source
stringlengths
3
92
c
stringlengths
26
2.25M
npb_cg.c
# 1 "main.c" # 0 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header> # 21 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header> # 1 "main.c" # 4 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 27 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> typedef int omp_lock_t ; # 29 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> struct omp_nest_lock { omp_lock_t act ; short cnt ; short tid ; } ; # 37 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> typedef struct omp_nest_lock omp_nest_lock_t ; # 41 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> typedef enum omp_sched_t { omp_sched_static = 1 , omp_sched_dynamic = 2 , omp_sched_guided = 3 , omp_sched_auto = 4 } omp_sched_t ; # 52 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> extern void omp_set_num_threads ( int n ) ; extern int omp_get_thread_num ( void ) ; extern int omp_get_num_procs ( void ) ; extern int omp_get_num_threads ( void ) ; extern int omp_get_max_threads ( void ) ; extern int omp_in_parallel ( void ) ; extern int omp_in_final ( void ) ; extern void omp_set_dynamic ( int n ) ; extern int omp_get_dynamic ( void ) ; extern void omp_set_nested ( int n ) ; extern int omp_get_nested ( void ) ; extern void omp_init_lock ( omp_lock_t * s ) ; extern void omp_destroy_lock ( omp_lock_t * s ) ; extern void omp_set_lock ( omp_lock_t * s ) ; extern void omp_unset_lock ( omp_lock_t * s ) ; extern int omp_test_lock ( omp_lock_t * s ) ; extern void omp_init_nest_lock ( omp_nest_lock_t * s ) ; extern void omp_destroy_nest_lock ( omp_nest_lock_t * s ) ; extern void omp_set_nest_lock ( omp_nest_lock_t * s ) ; extern void omp_unset_nest_lock ( omp_nest_lock_t * s ) ; extern int omp_test_nest_lock ( omp_nest_lock_t * s ) ; extern double omp_get_wtime ( void ) ; extern double omp_get_wtick ( void ) ; extern long omp_get_stack_size ( void ) ; extern void omp_set_stack_size ( long l ) ; extern int omp_get_thread_limit ( void ) ; extern void omp_set_max_active_levels ( int ) ; extern int omp_get_max_active_levels ( void ) ; extern int omp_get_level ( void ) ; extern int omp_get_ancestor_thread_num ( int ) ; extern int omp_get_team_size ( int ) ; extern int omp_get_active_level ( void ) ; extern void omp_set_schedule ( omp_sched_t , int ) ; extern void omp_get_schedule ( omp_sched_t * , int * ) ; extern int omp_get_initial_device ( ) ; extern int omp_get_default_device ( ) ; extern void omp_set_default_device ( int ) ; # 89 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 1 "/usr/include/stdlib.h" <System_Header> # 16 "/usr/include/stdlib.h" <System_Header> # 20 "/usr/include/stdlib.h" <System_Header> # 24 "/usr/include/stdlib.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 93 "/usr/include/features.h" <System_Header> # 96 "/usr/include/features.h" <System_Header> # 123 "/usr/include/features.h" <System_Header> # 134 "/usr/include/features.h" <System_Header> # 145 "/usr/include/features.h" <System_Header> # 156 "/usr/include/features.h" <System_Header> # 181 "/usr/include/features.h" <System_Header> # 191 "/usr/include/features.h" <System_Header> # 197 "/usr/include/features.h" <System_Header> # 203 "/usr/include/features.h" <System_Header> # 212 "/usr/include/features.h" <System_Header> # 220 "/usr/include/features.h" <System_Header> # 344 "/usr/include/features.h" <System_Header> # 345 "/usr/include/features.h" <System_Header> # 1 "/usr/include/stdc-predef.h" <System_Header> # 16 "/usr/include/stdc-predef.h" <System_Header> # 27 "/usr/include/stdc-predef.h" <System_Header> # 34 "/usr/include/stdc-predef.h" <System_Header> # 54 "/usr/include/stdc-predef.h" <System_Header> # 57 "/usr/include/stdc-predef.h" <System_Header> # 346 "/usr/include/features.h" <System_Header> # 352 "/usr/include/features.h" <System_Header> # 357 "/usr/include/features.h" <System_Header> # 364 "/usr/include/features.h" <System_Header> # 367 "/usr/include/features.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 33 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 81 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 86 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 91 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 96 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 110 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 121 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 131 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 147 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 173 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 202 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 209 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 217 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 227 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 234 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 243 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 252 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 264 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 274 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 283 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 291 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 305 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 313 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 328 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 347 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 356 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 361 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 368 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 410 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 411 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header> # 15 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 18 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 45 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header> # 368 "/usr/include/features.h" <System_Header> # 371 "/usr/include/features.h" <System_Header> # 379 "/usr/include/features.h" <System_Header> # 390 "/usr/include/features.h" <System_Header> # 391 "/usr/include/features.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header> # 3 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header> # 10 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header> # 4 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header> # 392 "/usr/include/features.h" <System_Header> # 25 "/usr/include/stdlib.h" <System_Header> # 26 "/usr/include/stdlib.h" <System_Header> # 32 "/usr/include/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 216 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> typedef unsigned long int size_t ; # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 292 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 312 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 328 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> typedef int wchar_t ; # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 33 "/usr/include/stdlib.h" <System_Header> # 40 "/usr/include/stdlib.h" <System_Header> # 41 "/usr/include/stdlib.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 45 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header> typedef enum { P_ALL , P_PID , P_PGID } idtype_t ; # 42 "/usr/include/stdlib.h" <System_Header> # 42 "/usr/include/stdlib.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 33 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 36 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 47 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 52 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 55 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 64 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 1 "/usr/include/endian.h" <System_Header> # 16 "/usr/include/endian.h" <System_Header> # 21 "/usr/include/endian.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 22 "/usr/include/endian.h" <System_Header> # 29 "/usr/include/endian.h" <System_Header> # 35 "/usr/include/endian.h" <System_Header> # 36 "/usr/include/endian.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/endian.h" <System_Header> # 37 "/usr/include/endian.h" <System_Header> # 39 "/usr/include/endian.h" <System_Header> # 59 "/usr/include/endian.h" <System_Header> # 60 "/usr/include/endian.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef unsigned char __u_char ; typedef unsigned short int __u_short ; typedef unsigned int __u_int ; typedef unsigned long int __u_long ; # 35 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef signed char __int8_t ; typedef unsigned char __uint8_t ; typedef signed short int __int16_t ; typedef unsigned short int __uint16_t ; typedef signed int __int32_t ; typedef unsigned int __uint32_t ; # 43 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef signed long int __int64_t ; typedef unsigned long int __uint64_t ; # 50 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 52 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __quad_t ; typedef unsigned long int __u_quad_t ; # 87 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 116 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 121 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 79 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 82 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 86 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header> # 122 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 124 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef unsigned long int __dev_t ; typedef unsigned int __uid_t ; typedef unsigned int __gid_t ; typedef unsigned long int __ino_t ; typedef unsigned long int __ino64_t ; typedef unsigned int __mode_t ; typedef unsigned long int __nlink_t ; typedef long int __off_t ; typedef long int __off64_t ; typedef int __pid_t ; typedef struct { int __val [ 2 ] ; } __fsid_t ; typedef long int __clock_t ; typedef unsigned long int __rlim_t ; typedef unsigned long int __rlim64_t ; typedef unsigned int __id_t ; typedef long int __time_t ; typedef unsigned int __useconds_t ; typedef long int __suseconds_t ; # 143 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef int __daddr_t ; typedef int __key_t ; # 146 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef int __clockid_t ; # 149 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef void * __timer_t ; # 152 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __blksize_t ; # 155 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 157 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __blkcnt_t ; typedef long int __blkcnt64_t ; # 161 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef unsigned long int __fsblkcnt_t ; typedef unsigned long int __fsblkcnt64_t ; # 165 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef unsigned long int __fsfilcnt_t ; typedef unsigned long int __fsfilcnt64_t ; # 169 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __fsword_t ; # 172 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __ssize_t ; # 174 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __syscall_slong_t ; typedef unsigned long int __syscall_ulong_t ; # 180 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef __off64_t __loff_t ; typedef __quad_t * __qaddr_t ; typedef char * __caddr_t ; # 185 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef long int __intptr_t ; # 188 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> typedef unsigned int __socklen_t ; # 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 34 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 35 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header> # 44 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header> static unsigned short int __bswap_16 ( unsigned short int __bsx ) { return ( ( unsigned short int ) ( ( ( ( __bsx ) >> 8 ) & 0xff ) | ( ( ( __bsx ) & 0xff ) << 8 ) ) ) ; } # 36 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 37 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> # 87 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> static unsigned int __bswap_32 ( unsigned int __bsx ) { return ( ( ( ( __bsx ) & 0xff000000 ) >> 24 ) | ( ( ( __bsx ) & 0x00ff0000 ) >> 8 ) | ( ( ( __bsx ) & 0x0000ff00 ) << 8 ) | ( ( ( __bsx ) & 0x000000ff ) << 24 ) ) ; } # 148 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header> static __uint64_t __bswap_64 ( __uint64_t __bsx ) { return ( ( ( ( __bsx ) & 0xff00000000000000ull ) >> 56 ) | ( ( ( __bsx ) & 0x00ff000000000000ull ) >> 40 ) | ( ( ( __bsx ) & 0x0000ff0000000000ull ) >> 24 ) | ( ( ( __bsx ) & 0x000000ff00000000ull ) >> 8 ) | ( ( ( __bsx ) & 0x00000000ff000000ull ) << 8 ) | ( ( ( __bsx ) & 0x0000000000ff0000ull ) << 24 ) | ( ( ( __bsx ) & 0x000000000000ff00ull ) << 40 ) | ( ( ( __bsx ) & 0x00000000000000ffull ) << 56 ) ) ; } # 61 "/usr/include/endian.h" <System_Header> # 65 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> # 66 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> union wait { int w_status ; struct { # 72 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> unsigned int __w_termsig : 7 ; unsigned int __w_coredump : 1 ; unsigned int __w_retcode : 8 ; unsigned int : 16 ; # 83 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> } __wait_terminated ; struct { # 87 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> unsigned int __w_stopval : 8 ; unsigned int __w_stopsig : 8 ; unsigned int : 16 ; # 96 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header> } __wait_stopped ; } ; # 43 "/usr/include/stdlib.h" <System_Header> # 47 "/usr/include/stdlib.h" <System_Header> # 60 "/usr/include/stdlib.h" <System_Header> # 83 "/usr/include/stdlib.h" <System_Header> # 96 "/usr/include/stdlib.h" <System_Header> typedef struct { int quot ; int rem ; } div_t ; # 103 "/usr/include/stdlib.h" <System_Header> # 105 "/usr/include/stdlib.h" <System_Header> typedef struct { long int quot ; long int rem ; } ldiv_t ; # 116 "/usr/include/stdlib.h" <System_Header> typedef struct { long long int quot ; long long int rem ; } lldiv_t ; # 127 "/usr/include/stdlib.h" <System_Header> # 132 "/usr/include/stdlib.h" <System_Header> # 137 "/usr/include/stdlib.h" <System_Header> # 139 "/usr/include/stdlib.h" <System_Header> extern size_t __ctype_get_mb_cur_max ( void ) ; # 143 "/usr/include/stdlib.h" <System_Header> extern double atof ( const char * __nptr ) ; extern int atoi ( const char * __nptr ) ; extern long int atol ( const char * __nptr ) ; # 156 "/usr/include/stdlib.h" <System_Header> extern long long int atoll ( const char * __nptr ) ; # 163 "/usr/include/stdlib.h" <System_Header> extern double strtod ( const char * __restrict __nptr , char * * __restrict __endptr ) ; # 171 "/usr/include/stdlib.h" <System_Header> extern float strtof ( const char * __restrict __nptr , char * * __restrict __endptr ) ; # 175 "/usr/include/stdlib.h" <System_Header> extern long double strtold ( const char * __restrict __nptr , char * * __restrict __endptr ) ; # 182 "/usr/include/stdlib.h" <System_Header> extern long int strtol ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; extern unsigned long int strtoul ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; # 193 "/usr/include/stdlib.h" <System_Header> # 195 "/usr/include/stdlib.h" <System_Header> extern long long int strtoq ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; # 200 "/usr/include/stdlib.h" <System_Header> extern unsigned long long int strtouq ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; # 207 "/usr/include/stdlib.h" <System_Header> # 209 "/usr/include/stdlib.h" <System_Header> extern long long int strtoll ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; # 214 "/usr/include/stdlib.h" <System_Header> extern unsigned long long int strtoull ( const char * __restrict __nptr , char * * __restrict __endptr , int __base ) ; # 304 "/usr/include/stdlib.h" <System_Header> extern char * l64a ( long int __n ) ; # 307 "/usr/include/stdlib.h" <System_Header> extern long int a64l ( const char * __s ) ; # 314 "/usr/include/stdlib.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 20 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 25 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 33 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __u_char u_char ; typedef __u_short u_short ; typedef __u_int u_int ; typedef __u_long u_long ; typedef __quad_t quad_t ; typedef __u_quad_t u_quad_t ; typedef __fsid_t fsid_t ; # 44 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __loff_t loff_t ; # 48 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __ino_t ino_t ; # 60 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __dev_t dev_t ; # 65 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __gid_t gid_t ; # 70 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __mode_t mode_t ; # 75 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __nlink_t nlink_t ; # 80 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __uid_t uid_t ; # 86 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __off_t off_t ; # 98 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __pid_t pid_t ; # 104 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __id_t id_t ; # 109 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __ssize_t ssize_t ; # 115 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __daddr_t daddr_t ; typedef __caddr_t caddr_t ; # 122 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __key_t key_t ; # 132 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/time.h" <System_Header> # 16 "/usr/include/time.h" <System_Header> # 20 "/usr/include/time.h" <System_Header> # 55 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 56 "/usr/include/time.h" <System_Header> # 58 "/usr/include/time.h" <System_Header> typedef __clock_t clock_t ; # 71 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 72 "/usr/include/time.h" <System_Header> # 74 "/usr/include/time.h" <System_Header> typedef __time_t time_t ; # 88 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 89 "/usr/include/time.h" <System_Header> # 90 "/usr/include/time.h" <System_Header> typedef __clockid_t clockid_t ; # 100 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 101 "/usr/include/time.h" <System_Header> # 102 "/usr/include/time.h" <System_Header> typedef __timer_t timer_t ; # 133 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 146 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 147 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 149 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef unsigned long int ulong ; typedef unsigned short int ushort ; typedef unsigned int uint ; # 155 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 159 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 162 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef char int8_t ; typedef short int int16_t ; typedef int int32_t ; # 166 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef long int int64_t ; # 172 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef unsigned char u_int8_t ; typedef unsigned short int u_int16_t ; typedef unsigned int u_int32_t ; # 177 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef unsigned long int u_int64_t ; # 182 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef int register_t ; # 215 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 216 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/endian.h" <System_Header> # 16 "/usr/include/endian.h" <System_Header> # 217 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 218 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 219 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 25 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header> # 47 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header> # 31 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 32 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 33 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> typedef int __sig_atomic_t ; # 24 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> typedef struct { unsigned long int __val [ ( 1024 / ( 8 * sizeof ( unsigned long int ) ) ) ] ; } __sigset_t ; # 39 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header> # 34 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 37 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> typedef __sigset_t sigset_t ; # 40 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/time.h" <System_Header> # 16 "/usr/include/time.h" <System_Header> # 20 "/usr/include/time.h" <System_Header> # 116 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 117 "/usr/include/time.h" <System_Header> # 119 "/usr/include/time.h" <System_Header> struct timespec { __time_t tv_sec ; __syscall_slong_t tv_nsec ; } ; # 44 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 45 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> struct timeval { __time_t tv_sec ; __suseconds_t tv_usec ; } ; # 46 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 48 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> typedef __suseconds_t suseconds_t ; # 53 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> typedef long int __fd_mask ; # 56 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 58 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 63 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> typedef struct { # 72 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> __fd_mask __fds_bits [ 1024 / ( 8 * ( int ) sizeof ( __fd_mask ) ) ] ; # 75 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> } fd_set ; # 77 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 81 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> typedef __fd_mask fd_mask ; # 84 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 89 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 105 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> extern int select ( int __nfds , fd_set * __restrict __readfds , fd_set * __restrict __writefds , fd_set * __restrict __exceptfds , struct timeval * __restrict __timeout ) ; # 117 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> extern int pselect ( int __nfds , fd_set * __restrict __readfds , fd_set * __restrict __writefds , fd_set * __restrict __exceptfds , const struct timespec * __restrict __timeout , const __sigset_t * __restrict __sigmask ) ; # 126 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 220 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 221 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 222 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> extern unsigned int gnu_dev_major ( unsigned long long int __dev ) ; # 30 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> extern unsigned int gnu_dev_minor ( unsigned long long int __dev ) ; # 33 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> extern unsigned long long int gnu_dev_makedev ( unsigned int __major , unsigned int __minor ) ; # 60 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header> # 223 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 228 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __blksize_t blksize_t ; # 232 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 235 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __blkcnt_t blkcnt_t ; # 239 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __fsblkcnt_t fsblkcnt_t ; # 243 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> typedef __fsfilcnt_t fsfilcnt_t ; # 268 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 270 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> # 59 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef unsigned long int pthread_t ; # 63 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> union pthread_attr_t { char __size [ 56 ] ; long int __align ; } ; # 69 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union pthread_attr_t pthread_attr_t ; # 75 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef struct __pthread_internal_list { struct __pthread_internal_list * __prev ; struct __pthread_internal_list * __next ; } __pthread_list_t ; # 89 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { struct __pthread_mutex_s { int __lock ; unsigned int __count ; int __owner ; # 98 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> unsigned int __nusers ; # 100 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> int __kind ; # 104 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> short __spins ; short __elision ; __pthread_list_t __list ; # 108 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> # 125 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> } __data ; char __size [ 40 ] ; long int __align ; } pthread_mutex_t ; # 130 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { char __size [ 4 ] ; int __align ; } pthread_mutexattr_t ; # 138 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { struct { int __lock ; unsigned int __futex ; unsigned long long int __total_seq ; unsigned long long int __wakeup_seq ; unsigned long long int __woken_seq ; void * __mutex ; unsigned int __nwaiters ; unsigned int __broadcast_seq ; } __data ; char __size [ 48 ] ; long long int __align ; } pthread_cond_t ; # 156 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { char __size [ 4 ] ; int __align ; } pthread_condattr_t ; # 163 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef unsigned int pthread_key_t ; # 167 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef int pthread_once_t ; # 173 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { # 177 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> struct { int __lock ; unsigned int __nr_readers ; unsigned int __readers_wakeup ; unsigned int __writer_wakeup ; unsigned int __nr_readers_queued ; unsigned int __nr_writers_queued ; int __writer ; int __shared ; signed char __rwelision ; # 192 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> unsigned char __pad1 [ 7 ] ; # 195 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> unsigned long int __pad2 ; unsigned int __flags ; # 200 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> } __data ; # 220 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> char __size [ 56 ] ; long int __align ; } pthread_rwlock_t ; # 224 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { char __size [ 8 ] ; long int __align ; } pthread_rwlockattr_t ; # 233 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef volatile int pthread_spinlock_t ; # 238 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { char __size [ 32 ] ; long int __align ; } pthread_barrier_t ; # 245 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header> typedef union { char __size [ 4 ] ; int __align ; } pthread_barrierattr_t ; # 271 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header> # 315 "/usr/include/stdlib.h" <System_Header> # 319 "/usr/include/stdlib.h" <System_Header> extern long int random ( void ) ; # 323 "/usr/include/stdlib.h" <System_Header> extern void srandom ( unsigned int __seed ) ; # 329 "/usr/include/stdlib.h" <System_Header> extern char * initstate ( unsigned int __seed , char * __statebuf , size_t __statelen ) ; # 334 "/usr/include/stdlib.h" <System_Header> extern char * setstate ( char * __statebuf ) ; # 341 "/usr/include/stdlib.h" <System_Header> # 343 "/usr/include/stdlib.h" <System_Header> struct random_data { int32_t * fptr ; int32_t * rptr ; int32_t * state ; int rand_type ; int rand_deg ; int rand_sep ; int32_t * end_ptr ; } ; # 354 "/usr/include/stdlib.h" <System_Header> extern int random_r ( struct random_data * __restrict __buf , int32_t * __restrict __result ) ; # 357 "/usr/include/stdlib.h" <System_Header> extern int srandom_r ( unsigned int __seed , struct random_data * __buf ) ; # 360 "/usr/include/stdlib.h" <System_Header> extern int initstate_r ( unsigned int __seed , char * __restrict __statebuf , size_t __statelen , struct random_data * __restrict __buf ) ; # 365 "/usr/include/stdlib.h" <System_Header> extern int setstate_r ( char * __restrict __statebuf , struct random_data * __restrict __buf ) ; # 373 "/usr/include/stdlib.h" <System_Header> extern int rand ( void ) ; extern void srand ( unsigned int __seed ) ; # 380 "/usr/include/stdlib.h" <System_Header> extern int rand_r ( unsigned int * __seed ) ; # 386 "/usr/include/stdlib.h" <System_Header> # 388 "/usr/include/stdlib.h" <System_Header> extern double drand48 ( void ) ; extern double erand48 ( unsigned short int __xsubi [ 3 ] ) ; # 392 "/usr/include/stdlib.h" <System_Header> extern long int lrand48 ( void ) ; extern long int nrand48 ( unsigned short int __xsubi [ 3 ] ) ; # 397 "/usr/include/stdlib.h" <System_Header> extern long int mrand48 ( void ) ; extern long int jrand48 ( unsigned short int __xsubi [ 3 ] ) ; # 402 "/usr/include/stdlib.h" <System_Header> extern void srand48 ( long int __seedval ) ; extern unsigned short int * seed48 ( unsigned short int __seed16v [ 3 ] ) ; extern void lcong48 ( unsigned short int __param [ 7 ] ) ; # 411 "/usr/include/stdlib.h" <System_Header> struct drand48_data { unsigned short int __x [ 3 ] ; unsigned short int __old_x [ 3 ] ; unsigned short int __c ; unsigned short int __init ; unsigned long long int __a ; } ; # 422 "/usr/include/stdlib.h" <System_Header> extern int drand48_r ( struct drand48_data * __restrict __buffer , double * __restrict __result ) ; extern int erand48_r ( unsigned short int __xsubi [ 3 ] , struct drand48_data * __restrict __buffer , double * __restrict __result ) ; # 429 "/usr/include/stdlib.h" <System_Header> extern int lrand48_r ( struct drand48_data * __restrict __buffer , long int * __restrict __result ) ; extern int nrand48_r ( unsigned short int __xsubi [ 3 ] , struct drand48_data * __restrict __buffer , long int * __restrict __result ) ; # 438 "/usr/include/stdlib.h" <System_Header> extern int mrand48_r ( struct drand48_data * __restrict __buffer , long int * __restrict __result ) ; extern int jrand48_r ( unsigned short int __xsubi [ 3 ] , struct drand48_data * __restrict __buffer , long int * __restrict __result ) ; # 447 "/usr/include/stdlib.h" <System_Header> extern int srand48_r ( long int __seedval , struct drand48_data * __buffer ) ; # 451 "/usr/include/stdlib.h" <System_Header> extern int seed48_r ( unsigned short int __seed16v [ 3 ] , struct drand48_data * __buffer ) ; # 454 "/usr/include/stdlib.h" <System_Header> extern int lcong48_r ( unsigned short int __param [ 7 ] , struct drand48_data * __buffer ) ; # 465 "/usr/include/stdlib.h" <System_Header> extern void * malloc ( size_t __size ) ; extern void * calloc ( size_t __nmemb , size_t __size ) ; # 476 "/usr/include/stdlib.h" <System_Header> # 479 "/usr/include/stdlib.h" <System_Header> extern void * realloc ( void * __ptr , size_t __size ) ; extern void free ( void * __ptr ) ; # 487 "/usr/include/stdlib.h" <System_Header> extern void cfree ( void * __ptr ) ; # 492 "/usr/include/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 22 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 23 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 26 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 29 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 33 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> # 39 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header> extern void * __alloca ( size_t __size ) ; extern void * alloca ( size_t __size ) ; extern void * __builtin_alloca ( size_t __size ) ; # 493 "/usr/include/stdlib.h" <System_Header> # 497 "/usr/include/stdlib.h" <System_Header> extern void * valloc ( size_t __size ) ; # 502 "/usr/include/stdlib.h" <System_Header> extern int posix_memalign ( void * * __memptr , size_t __alignment , size_t __size ) ; # 514 "/usr/include/stdlib.h" <System_Header> extern void abort ( void ) __attribute__ ( ( __noreturn__ ) ) ; # 518 "/usr/include/stdlib.h" <System_Header> extern int atexit ( void ( * __func ) ( void ) ) ; # 534 "/usr/include/stdlib.h" <System_Header> extern int on_exit ( void ( * __func ) ( int __status , void * __arg ) , void * __arg ) ; # 542 "/usr/include/stdlib.h" <System_Header> extern void exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ; # 556 "/usr/include/stdlib.h" <System_Header> extern void _Exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ; # 563 "/usr/include/stdlib.h" <System_Header> extern char * getenv ( const char * __name ) ; # 575 "/usr/include/stdlib.h" <System_Header> # 577 "/usr/include/stdlib.h" <System_Header> extern int putenv ( char * __string ) ; # 583 "/usr/include/stdlib.h" <System_Header> extern int setenv ( const char * __name , const char * __value , int __replace ) ; # 587 "/usr/include/stdlib.h" <System_Header> extern int unsetenv ( const char * __name ) ; # 594 "/usr/include/stdlib.h" <System_Header> extern int clearenv ( void ) ; # 605 "/usr/include/stdlib.h" <System_Header> extern char * mktemp ( char * __template ) ; # 617 "/usr/include/stdlib.h" <System_Header> # 619 "/usr/include/stdlib.h" <System_Header> extern int mkstemp ( char * __template ) ; # 639 "/usr/include/stdlib.h" <System_Header> # 641 "/usr/include/stdlib.h" <System_Header> extern int mkstemps ( char * __template , int __suffixlen ) ; # 661 "/usr/include/stdlib.h" <System_Header> extern char * mkdtemp ( char * __template ) ; # 715 "/usr/include/stdlib.h" <System_Header> extern int system ( const char * __command ) ; # 732 "/usr/include/stdlib.h" <System_Header> extern char * realpath ( const char * __restrict __name , char * __restrict __resolved ) ; # 738 "/usr/include/stdlib.h" <System_Header> # 741 "/usr/include/stdlib.h" <System_Header> typedef int ( * __compar_fn_t ) ( const void * , const void * ) ; # 753 "/usr/include/stdlib.h" <System_Header> extern void * bsearch ( const void * __key , const void * __base , size_t __nmemb , size_t __size , __compar_fn_t __compar ) ; # 763 "/usr/include/stdlib.h" <System_Header> extern void qsort ( void * __base , size_t __nmemb , size_t __size , __compar_fn_t __compar ) ; # 773 "/usr/include/stdlib.h" <System_Header> extern int abs ( int __x ) __attribute__ ( ( __const__ ) ) ; extern long int labs ( long int __x ) __attribute__ ( ( __const__ ) ) ; # 779 "/usr/include/stdlib.h" <System_Header> extern long long int llabs ( long long int __x ) __attribute__ ( ( __const__ ) ) ; # 786 "/usr/include/stdlib.h" <System_Header> extern div_t div ( int __numer , int __denom ) __attribute__ ( ( __const__ ) ) ; extern ldiv_t ldiv ( long int __numer , long int __denom ) __attribute__ ( ( __const__ ) ) ; # 796 "/usr/include/stdlib.h" <System_Header> extern lldiv_t lldiv ( long long int __numer , long long int __denom ) __attribute__ ( ( __const__ ) ) ; # 806 "/usr/include/stdlib.h" <System_Header> # 810 "/usr/include/stdlib.h" <System_Header> extern char * ecvt ( double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign ) ; # 816 "/usr/include/stdlib.h" <System_Header> extern char * fcvt ( double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign ) ; # 822 "/usr/include/stdlib.h" <System_Header> extern char * gcvt ( double __value , int __ndigit , char * __buf ) ; # 828 "/usr/include/stdlib.h" <System_Header> extern char * qecvt ( long double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign ) ; extern char * qfcvt ( long double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign ) ; extern char * qgcvt ( long double __value , int __ndigit , char * __buf ) ; # 840 "/usr/include/stdlib.h" <System_Header> extern int ecvt_r ( double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign , char * __restrict __buf , size_t __len ) ; extern int fcvt_r ( double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign , char * __restrict __buf , size_t __len ) ; # 848 "/usr/include/stdlib.h" <System_Header> extern int qecvt_r ( long double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign , char * __restrict __buf , size_t __len ) ; extern int qfcvt_r ( long double __value , int __ndigit , int * __restrict __decpt , int * __restrict __sign , char * __restrict __buf , size_t __len ) ; # 861 "/usr/include/stdlib.h" <System_Header> extern int mblen ( const char * __s , size_t __n ) ; # 864 "/usr/include/stdlib.h" <System_Header> extern int mbtowc ( wchar_t * __restrict __pwc , const char * __restrict __s , size_t __n ) ; # 868 "/usr/include/stdlib.h" <System_Header> extern int wctomb ( char * __s , wchar_t __wchar ) ; # 872 "/usr/include/stdlib.h" <System_Header> extern size_t mbstowcs ( wchar_t * __restrict __pwcs , const char * __restrict __s , size_t __n ) ; extern size_t wcstombs ( char * __restrict __s , const wchar_t * __restrict __pwcs , size_t __n ) ; # 886 "/usr/include/stdlib.h" <System_Header> extern int rpmatch ( const char * __response ) ; # 897 "/usr/include/stdlib.h" <System_Header> extern int getsubopt ( char * * __restrict __optionp , char * const * __restrict __tokens , char * * __restrict __valuep ) ; # 911 "/usr/include/stdlib.h" <System_Header> # 949 "/usr/include/stdlib.h" <System_Header> extern int getloadavg ( double __loadavg [ ] , int __nelem ) ; # 954 "/usr/include/stdlib.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header> # 955 "/usr/include/stdlib.h" <System_Header> # 956 "/usr/include/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 442 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 456 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 459 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> int __builtin_abs ( int ) ; # 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * malloc_managed ( size_t ) ; extern void * calloc_managed ( size_t , size_t ) ; extern void free_managed ( void * ) ; extern void cfree_managed ( void * ) ; # 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * realloc_managed ( void * , size_t ) ; extern void * valloc_managed ( size_t ) ; extern void * pvalloc_managed ( size_t ) ; extern void * memalign_managed ( size_t , size_t ) ; extern int posix_memalign_managed ( void * * , size_t , size_t ) ; extern char * tmpnam_managed ( char * ) ; # 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> # 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> extern void * omp_target_alloc ( size_t , int ) ; extern void omp_target_free ( void * , int ) ; extern int omp_target_memcpy ( void * , void * , size_t , size_t , size_t , int , int ) ; # 94 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header> typedef int _Atomic_word ; extern void _mp_atomic_add ( int * , int ) ; extern void _mp_exchange_and_add ( int * , int ) ; # 5 "main.c" # 5 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> # 27 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> typedef enum { acc_device_none = 0 , acc_device_default = 1 , acc_device_host = 2 , acc_device_not_host = 3 , acc_device_nvidia = 4 , acc_device_radeon = 5 , acc_device_xeonphi = 6 , acc_device_pgi_opencl = 7 , acc_device_nvidia_opencl = 8 , acc_device_opencl = 9 } acc_device_t ; # 45 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> void acc_set_default_async ( int async ) ; int acc_get_default_async ( void ) ; extern int acc_get_num_devices ( acc_device_t devtype ) ; extern acc_device_t acc_get_device ( void ) ; extern void acc_set_device_num ( int devnum , acc_device_t devtype ) ; extern int acc_get_device_num ( acc_device_t devtype ) ; extern void acc_init ( acc_device_t devtype ) ; extern void acc_shutdown ( acc_device_t devtype ) ; extern void acc_set_deviceid ( int devid ) ; extern int acc_get_deviceid ( int devnum , acc_device_t devtype ) ; extern int acc_async_test ( long async ) ; extern int acc_async_test_all ( void ) ; extern void acc_async_wait ( long async ) ; extern void acc_async_wait_all ( void ) ; extern void acc_wait ( long async ) ; extern void acc_wait_async ( long arg , long async ) ; extern void acc_wait_all ( void ) ; extern void acc_wait_all_async ( long async ) ; extern int acc_on_device ( acc_device_t devtype ) ; extern void acc_free ( void * ) ; # 66 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern void * acc_memcpy ( void * targetptr , void * srcptr , unsigned long bytes ) ; extern void * acc_memcpy_async ( void * targetptr , void * srcptr , unsigned long bytes , long async ) ; extern void * acc_copyin ( void * hostptr , unsigned long bytes ) ; extern void * acc_copyin_async ( void * hostptr , unsigned long bytes , long async ) ; extern void * acc_pcopyin ( void * hostptr , unsigned long bytes ) ; extern void * acc_pcopyin_async ( void * hostptr , unsigned long bytes , long async ) ; extern void * acc_present_or_copyin ( void * hostptr , unsigned long bytes ) ; extern void * acc_present_or_copyin_async ( void * hostptr , unsigned long bytes , long async ) ; extern void * acc_create ( void * hostptr , unsigned long bytes ) ; extern void * acc_create_async ( void * hostptr , unsigned long bytes , long async ) ; extern void * acc_pcreate ( void * hostptr , unsigned long bytes ) ; extern void * acc_pcreate_async ( void * hostptr , unsigned long bytes , long async ) ; extern void * acc_present_or_create ( void * hostptr , unsigned long bytes ) ; extern void * acc_present_or_create_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_copyout ( void * hostptr , unsigned long bytes ) ; extern void acc_copyout_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_delete ( void * hostptr , unsigned long bytes ) ; extern void acc_delete_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_update_device ( void * hostptr , unsigned long bytes ) ; extern void acc_update_device_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_update_self ( void * hostptr , unsigned long bytes ) ; extern void acc_update_self_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_update_host ( void * hostptr , unsigned long bytes ) ; extern void acc_update_host_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_memcpy_to_device ( void * devptr , void * hostptr , unsigned long bytes ) ; extern void acc_memcpy_to_device_async ( void * devptr , void * hostptr , unsigned long bytes , long async ) ; extern void acc_memcpy_from_device ( void * hostptr , void * devptr , unsigned long bytes ) ; extern void acc_memcpy_from_device_async ( void * hostptr , void * devptr , unsigned long bytes , long async ) ; extern void * acc_memcpy_device ( void * targetdevptr , void * srcdevptr , unsigned long bytes ) ; extern void * acc_memcpy_device_async ( void * targetdevptr , void * srcdevptr , unsigned long bytes , long async ) ; extern void acc_attach ( void * * hostptrptr ) ; extern void acc_attach_async ( void * * hostptrptr , long async ) ; extern void acc_detach ( void * * hostptrptr ) ; extern void acc_detach_async ( void * * hostptrptr , long async ) ; # 101 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern void acc_set_device_type ( acc_device_t devtype ) ; extern acc_device_t acc_get_device_type ( void ) ; extern void * acc_malloc ( unsigned long ) ; extern void * acc_deviceptr ( void * hostptr ) ; extern void * acc_hostptr ( void * devptr ) ; extern void acc_map_data ( void * hostptr , void * devptr , unsigned long bytes ) ; extern void acc_unmap_data ( void * hostptr ) ; extern int acc_is_present ( void * hostptr , unsigned long bytes ) ; extern int acc_present_count ( void * hostptr ) ; extern void acc_updatein ( void * hostptr , unsigned long bytes ) ; extern void acc_updatein_async ( void * hostptr , unsigned long bytes , long async ) ; extern void acc_updateout ( void * hostptr , unsigned long bytes ) ; extern void acc_updateout_async ( void * hostptr , unsigned long bytes , long async ) ; # 115 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern void * acc_get_current_cuda_context ( void ) ; extern int acc_get_current_cuda_device ( void ) ; extern void * acc_get_cuda_stream ( long ) ; extern void acc_set_cuda_stream ( long , void * ) ; extern void * acc_cuda_get_context ( int ) ; extern int acc_cuda_get_device ( int ) ; # 122 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern void * acc_get_current_opencl_context ( void ) ; extern void * acc_get_current_opencl_device ( void ) ; extern void * acc_get_opencl_queue ( long ) ; # 126 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicaddi ( void * address , int val ) ; extern unsigned int atomicaddu ( void * address , unsigned int val ) ; extern unsigned long long atomicaddul ( void * address , unsigned long long val ) ; extern float atomicaddf ( void * address , float val ) ; extern double atomicaddd ( void * address , double val ) ; # 133 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicsubi ( void * address , int val ) ; extern unsigned int atomicsubu ( void * address , unsigned int val ) ; extern unsigned long long atomicsubul ( void * address , unsigned long long val ) ; extern float atomicsubf ( void * address , float val ) ; extern double atomicsubd ( void * address , double val ) ; # 139 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicmaxi ( void * address , int val ) ; extern unsigned int atomicmaxu ( void * address , unsigned int val ) ; extern unsigned long long atomicmaxul ( void * address , unsigned long long val ) ; extern float atomicmaxf ( void * address , float val ) ; extern double atomicmaxd ( void * address , double val ) ; # 145 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicmini ( void * address , int val ) ; extern unsigned int atomicminu ( void * address , unsigned int val ) ; extern unsigned long long atomicminul ( void * address , unsigned long long val ) ; extern float atomicminf ( void * address , float val ) ; extern double atomicmind ( void * address , double val ) ; # 151 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicandi ( void * address , int val ) ; extern unsigned int atomicandu ( void * address , unsigned int val ) ; extern unsigned long long atomicandul ( void * address , unsigned long long val ) ; # 155 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicori ( void * address , int val ) ; extern unsigned int atomicoru ( void * address , unsigned int val ) ; extern unsigned long long atomicorul ( void * address , unsigned long long val ) ; # 159 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicxori ( void * address , int val ) ; extern unsigned int atomicxoru ( void * address , unsigned int val ) ; extern unsigned long long atomicxorul ( void * address , unsigned long long val ) ; # 163 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomicexchi ( void * address , int val ) ; extern unsigned int atomicexchu ( void * address , unsigned int val ) ; extern unsigned long long atomicexchul ( void * address , unsigned long long val ) ; extern float atomicexchf ( void * address , float val ) ; extern double atomicexchd ( void * address , double val ) ; # 169 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern unsigned int atomicincu ( void * address , unsigned int val ) ; # 171 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern unsigned int atomicdecu ( void * address , unsigned int val ) ; # 173 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int atomiccasi ( void * address , int val , int val2 ) ; extern unsigned int atomiccasu ( void * address , unsigned int val , unsigned int val2 ) ; extern unsigned long long atomiccasul ( void * address , unsigned long long val , unsigned long long val2 ) ; extern float atomiccasf ( void * address , float val , float val2 ) ; extern double atomiccasd ( void * address , double val , double val2 ) ; # 179 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> extern int __pgi_gangidx ( void ) ; extern int __pgi_workeridx ( void ) ; extern int __pgi_vectoridx ( void ) ; extern int __pgi_blockidx ( int ) ; extern int __pgi_threadidx ( int ) ; # 6 "main.c" # 6 "main.c" # 1 "/usr/include/stdio.h" <System_Header> # 17 "/usr/include/stdio.h" <System_Header> # 21 "/usr/include/stdio.h" <System_Header> # 27 "/usr/include/stdio.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 28 "/usr/include/stdio.h" <System_Header> # 33 "/usr/include/stdio.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 34 "/usr/include/stdio.h" <System_Header> # 35 "/usr/include/stdio.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 36 "/usr/include/stdio.h" <System_Header> # 43 "/usr/include/stdio.h" <System_Header> struct _IO_FILE ; # 47 "/usr/include/stdio.h" <System_Header> typedef struct _IO_FILE FILE ; # 63 "/usr/include/stdio.h" <System_Header> typedef struct _IO_FILE __FILE ; # 74 "/usr/include/stdio.h" <System_Header> # 1 "/usr/include/libio.h" <System_Header> # 26 "/usr/include/libio.h" <System_Header> # 31 "/usr/include/libio.h" <System_Header> # 1 "/usr/include/_G_config.h" <System_Header> # 2 "/usr/include/_G_config.h" <System_Header> # 7 "/usr/include/_G_config.h" <System_Header> # 9 "/usr/include/_G_config.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 10 "/usr/include/_G_config.h" <System_Header> # 15 "/usr/include/_G_config.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/usr/include/_G_config.h" <System_Header> # 20 "/usr/include/_G_config.h" <System_Header> # 1 "/usr/include/wchar.h" <System_Header> # 16 "/usr/include/wchar.h" <System_Header> # 21 "/usr/include/wchar.h" <System_Header> # 51 "/usr/include/wchar.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 357 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> typedef unsigned int wint_t ; # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 52 "/usr/include/wchar.h" <System_Header> # 54 "/usr/include/wchar.h" <System_Header> # 64 "/usr/include/wchar.h" <System_Header> # 73 "/usr/include/wchar.h" <System_Header> # 81 "/usr/include/wchar.h" <System_Header> typedef struct { int __count ; union { # 88 "/usr/include/wchar.h" <System_Header> unsigned int __wch ; # 92 "/usr/include/wchar.h" <System_Header> char __wchb [ 4 ] ; } __value ; } __mbstate_t ; # 100 "/usr/include/wchar.h" <System_Header> # 901 "/usr/include/wchar.h" <System_Header> # 21 "/usr/include/_G_config.h" <System_Header> # 21 "/usr/include/_G_config.h" <System_Header> typedef struct { __off_t __pos ; __mbstate_t __state ; } _G_fpos_t ; typedef struct { __off64_t __pos ; __mbstate_t __state ; } _G_fpos64_t ; # 45 "/usr/include/_G_config.h" <System_Header> # 53 "/usr/include/_G_config.h" <System_Header> # 32 "/usr/include/libio.h" <System_Header> # 32 "/usr/include/libio.h" <System_Header> # 47 "/usr/include/libio.h" <System_Header> # 49 "/usr/include/libio.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 24 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> # 34 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> typedef struct __pgi_tag { unsigned int gp_offset ; unsigned int fp_offset ; char * overflow_arg_area ; char * reg_save_area ; } __pgi_va_list [ 1 ] ; # 49 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> typedef __pgi_va_list va_list ; # 60 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header> typedef __pgi_va_list __gnuc_va_list ; # 25 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> extern void * __builtin_va_arg ( ) ; extern int __builtin_va_start ( ) ; # 60 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 50 "/usr/include/libio.h" <System_Header> # 90 "/usr/include/libio.h" <System_Header> # 124 "/usr/include/libio.h" <System_Header> # 144 "/usr/include/libio.h" <System_Header> struct _IO_jump_t ; struct _IO_FILE ; # 146 "/usr/include/libio.h" <System_Header> # 150 "/usr/include/libio.h" <System_Header> typedef void _IO_lock_t ; # 154 "/usr/include/libio.h" <System_Header> # 156 "/usr/include/libio.h" <System_Header> struct _IO_marker { struct _IO_marker * _next ; struct _IO_FILE * _sbuf ; int _pos ; # 173 "/usr/include/libio.h" <System_Header> } ; # 175 "/usr/include/libio.h" <System_Header> enum __codecvt_result { __codecvt_ok , __codecvt_partial , __codecvt_error , __codecvt_noconv } ; # 241 "/usr/include/libio.h" <System_Header> struct _IO_FILE { int _flags ; # 245 "/usr/include/libio.h" <System_Header> char * _IO_read_ptr ; char * _IO_read_end ; char * _IO_read_base ; char * _IO_write_base ; char * _IO_write_ptr ; char * _IO_write_end ; char * _IO_buf_base ; char * _IO_buf_end ; char * _IO_save_base ; char * _IO_backup_base ; char * _IO_save_end ; # 260 "/usr/include/libio.h" <System_Header> struct _IO_marker * _markers ; # 262 "/usr/include/libio.h" <System_Header> struct _IO_FILE * _chain ; # 264 "/usr/include/libio.h" <System_Header> int _fileno ; # 268 "/usr/include/libio.h" <System_Header> int _flags2 ; # 270 "/usr/include/libio.h" <System_Header> __off_t _old_offset ; # 273 "/usr/include/libio.h" <System_Header> unsigned short _cur_column ; signed char _vtable_offset ; char _shortbuf [ 1 ] ; # 278 "/usr/include/libio.h" <System_Header> # 280 "/usr/include/libio.h" <System_Header> _IO_lock_t * _lock ; # 289 "/usr/include/libio.h" <System_Header> __off64_t _offset ; # 297 "/usr/include/libio.h" <System_Header> void * __pad1 ; void * __pad2 ; void * __pad3 ; void * __pad4 ; # 302 "/usr/include/libio.h" <System_Header> size_t __pad5 ; int _mode ; char _unused2 [ 15 * sizeof ( int ) - 4 * sizeof ( void * ) - sizeof ( size_t ) ] ; # 307 "/usr/include/libio.h" <System_Header> } ; # 310 "/usr/include/libio.h" <System_Header> typedef struct _IO_FILE _IO_FILE ; # 313 "/usr/include/libio.h" <System_Header> struct _IO_FILE_plus ; # 315 "/usr/include/libio.h" <System_Header> extern struct _IO_FILE_plus _IO_2_1_stdin_ ; extern struct _IO_FILE_plus _IO_2_1_stdout_ ; extern struct _IO_FILE_plus _IO_2_1_stderr_ ; # 329 "/usr/include/libio.h" <System_Header> # 332 "/usr/include/libio.h" <System_Header> typedef __ssize_t __io_read_fn ( void * __cookie , char * __buf , size_t __nbytes ) ; # 340 "/usr/include/libio.h" <System_Header> typedef __ssize_t __io_write_fn ( void * __cookie , const char * __buf , size_t __n ) ; # 349 "/usr/include/libio.h" <System_Header> typedef int __io_seek_fn ( void * __cookie , __off64_t * __pos , int __w ) ; # 352 "/usr/include/libio.h" <System_Header> typedef int __io_close_fn ( void * __cookie ) ; # 385 "/usr/include/libio.h" <System_Header> extern int __underflow ( _IO_FILE * ) ; extern int __uflow ( _IO_FILE * ) ; extern int __overflow ( _IO_FILE * , int ) ; # 429 "/usr/include/libio.h" <System_Header> extern int _IO_getc ( _IO_FILE * __fp ) ; extern int _IO_putc ( int __c , _IO_FILE * __fp ) ; extern int _IO_feof ( _IO_FILE * __fp ) ; extern int _IO_ferror ( _IO_FILE * __fp ) ; # 434 "/usr/include/libio.h" <System_Header> extern int _IO_peekc_locked ( _IO_FILE * __fp ) ; # 436 "/usr/include/libio.h" <System_Header> # 440 "/usr/include/libio.h" <System_Header> extern void _IO_flockfile ( _IO_FILE * ) ; extern void _IO_funlockfile ( _IO_FILE * ) ; extern int _IO_ftrylockfile ( _IO_FILE * ) ; # 459 "/usr/include/libio.h" <System_Header> extern int _IO_vfscanf ( _IO_FILE * __restrict , const char * __restrict , __gnuc_va_list , int * __restrict ) ; extern int _IO_vfprintf ( _IO_FILE * __restrict , const char * __restrict , __gnuc_va_list ) ; extern __ssize_t _IO_padn ( _IO_FILE * , int , __ssize_t ) ; extern size_t _IO_sgetn ( _IO_FILE * , void * , size_t ) ; # 466 "/usr/include/libio.h" <System_Header> extern __off64_t _IO_seekoff ( _IO_FILE * , __off64_t , int , int ) ; extern __off64_t _IO_seekpos ( _IO_FILE * , __off64_t , int ) ; # 469 "/usr/include/libio.h" <System_Header> extern void _IO_free_backup_area ( _IO_FILE * ) ; # 75 "/usr/include/stdio.h" <System_Header> # 83 "/usr/include/stdio.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header> # 84 "/usr/include/stdio.h" <System_Header> # 107 "/usr/include/stdio.h" <System_Header> # 110 "/usr/include/stdio.h" <System_Header> typedef _G_fpos_t fpos_t ; # 119 "/usr/include/stdio.h" <System_Header> # 125 "/usr/include/stdio.h" <System_Header> # 132 "/usr/include/stdio.h" <System_Header> # 139 "/usr/include/stdio.h" <System_Header> # 150 "/usr/include/stdio.h" <System_Header> # 163 "/usr/include/stdio.h" <System_Header> # 164 "/usr/include/stdio.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header> # 165 "/usr/include/stdio.h" <System_Header> # 167 "/usr/include/stdio.h" <System_Header> extern struct _IO_FILE * stdin ; extern struct _IO_FILE * stdout ; extern struct _IO_FILE * stderr ; # 177 "/usr/include/stdio.h" <System_Header> extern int remove ( const char * __filename ) ; extern int rename ( const char * __old , const char * __new ) ; # 184 "/usr/include/stdio.h" <System_Header> extern int renameat ( int __oldfd , const char * __old , int __newfd , const char * __new ) ; # 193 "/usr/include/stdio.h" <System_Header> # 195 "/usr/include/stdio.h" <System_Header> extern FILE * tmpfile ( void ) ; # 208 "/usr/include/stdio.h" <System_Header> extern char * tmpnam ( char * __s ) ; # 214 "/usr/include/stdio.h" <System_Header> extern char * tmpnam_r ( char * __s ) ; # 226 "/usr/include/stdio.h" <System_Header> extern char * tempnam ( const char * __dir , const char * __pfx ) ; # 236 "/usr/include/stdio.h" <System_Header> extern int fclose ( FILE * __stream ) ; # 241 "/usr/include/stdio.h" <System_Header> extern int fflush ( FILE * __stream ) ; # 251 "/usr/include/stdio.h" <System_Header> extern int fflush_unlocked ( FILE * __stream ) ; # 271 "/usr/include/stdio.h" <System_Header> extern FILE * fopen ( const char * __restrict __filename , const char * __restrict __modes ) ; # 277 "/usr/include/stdio.h" <System_Header> extern FILE * freopen ( const char * __restrict __filename , const char * __restrict __modes , FILE * __restrict __stream ) ; # 305 "/usr/include/stdio.h" <System_Header> extern FILE * fdopen ( int __fd , const char * __modes ) ; # 318 "/usr/include/stdio.h" <System_Header> extern FILE * fmemopen ( void * __s , size_t __len , const char * __modes ) ; # 324 "/usr/include/stdio.h" <System_Header> extern FILE * open_memstream ( char * * __bufloc , size_t * __sizeloc ) ; # 331 "/usr/include/stdio.h" <System_Header> extern void setbuf ( FILE * __restrict __stream , char * __restrict __buf ) ; # 335 "/usr/include/stdio.h" <System_Header> extern int setvbuf ( FILE * __restrict __stream , char * __restrict __buf , int __modes , size_t __n ) ; # 342 "/usr/include/stdio.h" <System_Header> extern void setbuffer ( FILE * __restrict __stream , char * __restrict __buf , size_t __size ) ; # 346 "/usr/include/stdio.h" <System_Header> extern void setlinebuf ( FILE * __stream ) ; # 355 "/usr/include/stdio.h" <System_Header> extern int fprintf ( FILE * __restrict __stream , const char * __restrict __format , ... ) ; # 361 "/usr/include/stdio.h" <System_Header> extern int printf ( const char * __restrict __format , ... ) ; extern int sprintf ( char * __restrict __s , const char * __restrict __format , ... ) ; # 370 "/usr/include/stdio.h" <System_Header> extern int vfprintf ( FILE * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) ; # 376 "/usr/include/stdio.h" <System_Header> extern int vprintf ( const char * __restrict __format , __gnuc_va_list __arg ) ; extern int vsprintf ( char * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) ; # 385 "/usr/include/stdio.h" <System_Header> extern int snprintf ( char * __restrict __s , size_t __maxlen , const char * __restrict __format , ... ) __attribute__ ( ( __format__ ( __printf__ , 3 , 4 ) ) ) ; # 390 "/usr/include/stdio.h" <System_Header> extern int vsnprintf ( char * __restrict __s , size_t __maxlen , const char * __restrict __format , __gnuc_va_list __arg ) __attribute__ ( ( __format__ ( __printf__ , 3 , 0 ) ) ) ; # 411 "/usr/include/stdio.h" <System_Header> extern int vdprintf ( int __fd , const char * __restrict __fmt , __gnuc_va_list __arg ) __attribute__ ( ( __format__ ( __printf__ , 2 , 0 ) ) ) ; extern int dprintf ( int __fd , const char * __restrict __fmt , ... ) __attribute__ ( ( __format__ ( __printf__ , 2 , 3 ) ) ) ; # 424 "/usr/include/stdio.h" <System_Header> extern int fscanf ( FILE * __restrict __stream , const char * __restrict __format , ... ) ; # 430 "/usr/include/stdio.h" <System_Header> extern int scanf ( const char * __restrict __format , ... ) ; extern int sscanf ( const char * __restrict __s , const char * __restrict __format , ... ) ; # 452 "/usr/include/stdio.h" <System_Header> extern int __isoc99_fscanf ( FILE * __restrict __stream , const char * __restrict __format , ... ) ; extern int __isoc99_scanf ( const char * __restrict __format , ... ) ; extern int __isoc99_sscanf ( const char * __restrict __s , const char * __restrict __format , ... ) ; # 470 "/usr/include/stdio.h" <System_Header> extern int vfscanf ( FILE * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) __attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ; # 478 "/usr/include/stdio.h" <System_Header> extern int vscanf ( const char * __restrict __format , __gnuc_va_list __arg ) __attribute__ ( ( __format__ ( __scanf__ , 1 , 0 ) ) ) ; # 482 "/usr/include/stdio.h" <System_Header> extern int vsscanf ( const char * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) __attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ; # 508 "/usr/include/stdio.h" <System_Header> extern int __isoc99_vfscanf ( FILE * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) ; extern int __isoc99_vscanf ( const char * __restrict __format , __gnuc_va_list __arg ) ; extern int __isoc99_vsscanf ( const char * __restrict __s , const char * __restrict __format , __gnuc_va_list __arg ) ; # 530 "/usr/include/stdio.h" <System_Header> extern int fgetc ( FILE * __stream ) ; extern int getc ( FILE * __stream ) ; # 537 "/usr/include/stdio.h" <System_Header> extern int getchar ( void ) ; # 542 "/usr/include/stdio.h" <System_Header> # 549 "/usr/include/stdio.h" <System_Header> extern int getc_unlocked ( FILE * __stream ) ; extern int getchar_unlocked ( void ) ; # 560 "/usr/include/stdio.h" <System_Header> extern int fgetc_unlocked ( FILE * __stream ) ; # 572 "/usr/include/stdio.h" <System_Header> extern int fputc ( int __c , FILE * __stream ) ; extern int putc ( int __c , FILE * __stream ) ; # 579 "/usr/include/stdio.h" <System_Header> extern int putchar ( int __c ) ; # 584 "/usr/include/stdio.h" <System_Header> # 593 "/usr/include/stdio.h" <System_Header> extern int fputc_unlocked ( int __c , FILE * __stream ) ; # 601 "/usr/include/stdio.h" <System_Header> extern int putc_unlocked ( int __c , FILE * __stream ) ; extern int putchar_unlocked ( int __c ) ; # 609 "/usr/include/stdio.h" <System_Header> extern int getw ( FILE * __stream ) ; # 612 "/usr/include/stdio.h" <System_Header> extern int putw ( int __w , FILE * __stream ) ; # 621 "/usr/include/stdio.h" <System_Header> extern char * fgets ( char * __restrict __s , int __n , FILE * __restrict __stream ) ; # 637 "/usr/include/stdio.h" <System_Header> extern char * gets ( char * __s ) ; # 664 "/usr/include/stdio.h" <System_Header> extern __ssize_t __getdelim ( char * * __restrict __lineptr , size_t * __restrict __n , int __delimiter , FILE * __restrict __stream ) ; extern __ssize_t getdelim ( char * * __restrict __lineptr , size_t * __restrict __n , int __delimiter , FILE * __restrict __stream ) ; # 677 "/usr/include/stdio.h" <System_Header> extern __ssize_t getline ( char * * __restrict __lineptr , size_t * __restrict __n , FILE * __restrict __stream ) ; # 688 "/usr/include/stdio.h" <System_Header> extern int fputs ( const char * __restrict __s , FILE * __restrict __stream ) ; # 694 "/usr/include/stdio.h" <System_Header> extern int puts ( const char * __s ) ; # 701 "/usr/include/stdio.h" <System_Header> extern int ungetc ( int __c , FILE * __stream ) ; # 708 "/usr/include/stdio.h" <System_Header> extern size_t fread ( void * __restrict __ptr , size_t __size , size_t __n , FILE * __restrict __stream ) ; # 714 "/usr/include/stdio.h" <System_Header> extern size_t fwrite ( const void * __restrict __ptr , size_t __size , size_t __n , FILE * __restrict __s ) ; # 736 "/usr/include/stdio.h" <System_Header> extern size_t fread_unlocked ( void * __restrict __ptr , size_t __size , size_t __n , FILE * __restrict __stream ) ; extern size_t fwrite_unlocked ( const void * __restrict __ptr , size_t __size , size_t __n , FILE * __restrict __stream ) ; # 748 "/usr/include/stdio.h" <System_Header> extern int fseek ( FILE * __stream , long int __off , int __whence ) ; # 753 "/usr/include/stdio.h" <System_Header> extern long int ftell ( FILE * __stream ) ; # 758 "/usr/include/stdio.h" <System_Header> extern void rewind ( FILE * __stream ) ; # 765 "/usr/include/stdio.h" <System_Header> # 772 "/usr/include/stdio.h" <System_Header> extern int fseeko ( FILE * __stream , __off_t __off , int __whence ) ; # 777 "/usr/include/stdio.h" <System_Header> extern __off_t ftello ( FILE * __stream ) ; # 797 "/usr/include/stdio.h" <System_Header> extern int fgetpos ( FILE * __restrict __stream , fpos_t * __restrict __pos ) ; # 802 "/usr/include/stdio.h" <System_Header> extern int fsetpos ( FILE * __stream , const fpos_t * __pos ) ; # 825 "/usr/include/stdio.h" <System_Header> extern void clearerr ( FILE * __stream ) ; extern int feof ( FILE * __stream ) ; extern int ferror ( FILE * __stream ) ; # 834 "/usr/include/stdio.h" <System_Header> extern void clearerr_unlocked ( FILE * __stream ) ; extern int feof_unlocked ( FILE * __stream ) ; extern int ferror_unlocked ( FILE * __stream ) ; # 845 "/usr/include/stdio.h" <System_Header> extern void perror ( const char * __s ) ; # 852 "/usr/include/stdio.h" <System_Header> # 853 "/usr/include/stdio.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header> extern int sys_nerr ; extern const char * const sys_errlist [ ] ; # 854 "/usr/include/stdio.h" <System_Header> # 857 "/usr/include/stdio.h" <System_Header> extern int fileno ( FILE * __stream ) ; # 862 "/usr/include/stdio.h" <System_Header> extern int fileno_unlocked ( FILE * __stream ) ; # 871 "/usr/include/stdio.h" <System_Header> extern FILE * popen ( const char * __command , const char * __modes ) ; # 877 "/usr/include/stdio.h" <System_Header> extern int pclose ( FILE * __stream ) ; # 883 "/usr/include/stdio.h" <System_Header> extern char * ctermid ( char * __s ) ; # 909 "/usr/include/stdio.h" <System_Header> # 911 "/usr/include/stdio.h" <System_Header> extern void flockfile ( FILE * __stream ) ; # 915 "/usr/include/stdio.h" <System_Header> extern int ftrylockfile ( FILE * __stream ) ; # 918 "/usr/include/stdio.h" <System_Header> extern void funlockfile ( FILE * __stream ) ; # 931 "/usr/include/stdio.h" <System_Header> # 7 "main.c" # 7 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * malloc_managed ( size_t ) ; extern void * calloc_managed ( size_t , size_t ) ; extern void free_managed ( void * ) ; extern void cfree_managed ( void * ) ; # 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * realloc_managed ( void * , size_t ) ; extern void * valloc_managed ( size_t ) ; extern void * pvalloc_managed ( size_t ) ; extern void * memalign_managed ( size_t , size_t ) ; extern int posix_memalign_managed ( void * * , size_t , size_t ) ; extern char * tmpnam_managed ( char * ) ; # 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 8 "main.c" # 8 "main.c" # 1 "/usr/include/string.h" <System_Header> # 16 "/usr/include/string.h" <System_Header> # 20 "/usr/include/string.h" <System_Header> # 25 "/usr/include/string.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 26 "/usr/include/string.h" <System_Header> # 29 "/usr/include/string.h" <System_Header> # 32 "/usr/include/string.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 33 "/usr/include/string.h" <System_Header> # 34 "/usr/include/string.h" <System_Header> # 41 "/usr/include/string.h" <System_Header> extern void * memcpy ( void * __restrict __dest , const void * __restrict __src , size_t __n ) ; # 45 "/usr/include/string.h" <System_Header> extern void * memmove ( void * __dest , const void * __src , size_t __n ) ; # 52 "/usr/include/string.h" <System_Header> # 54 "/usr/include/string.h" <System_Header> extern void * memccpy ( void * __restrict __dest , const void * __restrict __src , int __c , size_t __n ) ; # 61 "/usr/include/string.h" <System_Header> extern void * memset ( void * __s , int __c , size_t __n ) ; # 64 "/usr/include/string.h" <System_Header> extern int memcmp ( const void * __s1 , const void * __s2 , size_t __n ) ; # 68 "/usr/include/string.h" <System_Header> # 92 "/usr/include/string.h" <System_Header> extern void * memchr ( const void * __s , int __c , size_t __n ) ; # 124 "/usr/include/string.h" <System_Header> extern char * strcpy ( char * __restrict __dest , const char * __restrict __src ) ; extern char * strncpy ( char * __restrict __dest , const char * __restrict __src , size_t __n ) ; # 132 "/usr/include/string.h" <System_Header> extern char * strcat ( char * __restrict __dest , const char * __restrict __src ) ; extern char * strncat ( char * __restrict __dest , const char * __restrict __src , size_t __n ) ; # 139 "/usr/include/string.h" <System_Header> extern int strcmp ( const char * __s1 , const char * __s2 ) ; extern int strncmp ( const char * __s1 , const char * __s2 , size_t __n ) ; # 146 "/usr/include/string.h" <System_Header> extern int strcoll ( const char * __s1 , const char * __s2 ) ; extern size_t strxfrm ( char * __restrict __dest , const char * __restrict __src , size_t __n ) ; # 158 "/usr/include/string.h" <System_Header> # 159 "/usr/include/string.h" <System_Header> # 1 "/usr/include/xlocale.h" <System_Header> # 18 "/usr/include/xlocale.h" <System_Header> # 26 "/usr/include/xlocale.h" <System_Header> typedef struct __locale_struct { struct __locale_data * __locales [ 13 ] ; # 32 "/usr/include/xlocale.h" <System_Header> const unsigned short int * __ctype_b ; const int * __ctype_tolower ; const int * __ctype_toupper ; # 37 "/usr/include/xlocale.h" <System_Header> const char * __names [ 13 ] ; } * __locale_t ; # 41 "/usr/include/xlocale.h" <System_Header> typedef __locale_t locale_t ; # 160 "/usr/include/string.h" <System_Header> # 161 "/usr/include/string.h" <System_Header> extern int strcoll_l ( const char * __s1 , const char * __s2 , __locale_t __l ) ; extern size_t strxfrm_l ( char * __dest , const char * __src , size_t __n , __locale_t __l ) ; # 170 "/usr/include/string.h" <System_Header> extern char * strdup ( const char * __s ) ; # 177 "/usr/include/string.h" <System_Header> # 179 "/usr/include/string.h" <System_Header> extern char * strndup ( const char * __string , size_t __n ) ; # 207 "/usr/include/string.h" <System_Header> # 231 "/usr/include/string.h" <System_Header> extern char * strchr ( const char * __s , int __c ) ; # 234 "/usr/include/string.h" <System_Header> # 258 "/usr/include/string.h" <System_Header> extern char * strrchr ( const char * __s , int __c ) ; # 279 "/usr/include/string.h" <System_Header> extern size_t strcspn ( const char * __s , const char * __reject ) ; # 283 "/usr/include/string.h" <System_Header> extern size_t strspn ( const char * __s , const char * __accept ) ; # 310 "/usr/include/string.h" <System_Header> extern char * strpbrk ( const char * __s , const char * __accept ) ; # 313 "/usr/include/string.h" <System_Header> # 337 "/usr/include/string.h" <System_Header> extern char * strstr ( const char * __haystack , const char * __needle ) ; # 342 "/usr/include/string.h" <System_Header> extern char * strtok ( char * __restrict __s , const char * __restrict __delim ) ; # 348 "/usr/include/string.h" <System_Header> extern char * __strtok_r ( char * __restrict __s , const char * __restrict __delim , char * * __restrict __save_ptr ) ; # 354 "/usr/include/string.h" <System_Header> extern char * strtok_r ( char * __restrict __s , const char * __restrict __delim , char * * __restrict __save_ptr ) ; # 393 "/usr/include/string.h" <System_Header> extern size_t strlen ( const char * __s ) ; # 400 "/usr/include/string.h" <System_Header> extern size_t strnlen ( const char * __string , size_t __maxlen ) ; # 407 "/usr/include/string.h" <System_Header> extern char * strerror ( int __errnum ) ; # 417 "/usr/include/string.h" <System_Header> # 420 "/usr/include/string.h" <System_Header> # 426 "/usr/include/string.h" <System_Header> extern int __xpg_strerror_r ( int __errnum , char * __buf , size_t __buflen ) ; # 439 "/usr/include/string.h" <System_Header> extern char * strerror_l ( int __errnum , __locale_t __l ) ; # 445 "/usr/include/string.h" <System_Header> extern void __bzero ( void * __s , size_t __n ) ; # 449 "/usr/include/string.h" <System_Header> extern void bcopy ( const void * __src , void * __dest , size_t __n ) ; # 453 "/usr/include/string.h" <System_Header> extern void bzero ( void * __s , size_t __n ) ; # 456 "/usr/include/string.h" <System_Header> extern int bcmp ( const void * __s1 , const void * __s2 , size_t __n ) ; # 460 "/usr/include/string.h" <System_Header> # 484 "/usr/include/string.h" <System_Header> extern char * index ( const char * __s , int __c ) ; # 488 "/usr/include/string.h" <System_Header> # 512 "/usr/include/string.h" <System_Header> extern char * rindex ( const char * __s , int __c ) ; # 517 "/usr/include/string.h" <System_Header> extern int ffs ( int __i ) __attribute__ ( ( __const__ ) ) ; # 521 "/usr/include/string.h" <System_Header> # 528 "/usr/include/string.h" <System_Header> extern int strcasecmp ( const char * __s1 , const char * __s2 ) ; # 532 "/usr/include/string.h" <System_Header> extern int strncasecmp ( const char * __s1 , const char * __s2 , size_t __n ) ; # 551 "/usr/include/string.h" <System_Header> extern char * strsep ( char * * __restrict __stringp , const char * __restrict __delim ) ; # 558 "/usr/include/string.h" <System_Header> extern char * strsignal ( int __sig ) ; # 561 "/usr/include/string.h" <System_Header> extern char * __stpcpy ( char * __restrict __dest , const char * __restrict __src ) ; extern char * stpcpy ( char * __restrict __dest , const char * __restrict __src ) ; # 568 "/usr/include/string.h" <System_Header> extern char * __stpncpy ( char * __restrict __dest , const char * __restrict __src , size_t __n ) ; extern char * stpncpy ( char * __restrict __dest , const char * __restrict __src , size_t __n ) ; # 9 "main.c" # 9 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 33 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 34 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header> # 4 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header> # 7 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 1 "/usr/include/limits.h" <System_Header> # 16 "/usr/include/limits.h" <System_Header> # 20 "/usr/include/limits.h" <System_Header> # 25 "/usr/include/limits.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 26 "/usr/include/limits.h" <System_Header> # 30 "/usr/include/limits.h" <System_Header> # 35 "/usr/include/limits.h" <System_Header> # 40 "/usr/include/limits.h" <System_Header> # 44 "/usr/include/limits.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header> # 45 "/usr/include/limits.h" <System_Header> # 47 "/usr/include/limits.h" <System_Header> # 50 "/usr/include/limits.h" <System_Header> # 52 "/usr/include/limits.h" <System_Header> # 55 "/usr/include/limits.h" <System_Header> # 59 "/usr/include/limits.h" <System_Header> # 62 "/usr/include/limits.h" <System_Header> # 71 "/usr/include/limits.h" <System_Header> # 75 "/usr/include/limits.h" <System_Header> # 78 "/usr/include/limits.h" <System_Header> # 82 "/usr/include/limits.h" <System_Header> # 85 "/usr/include/limits.h" <System_Header> # 93 "/usr/include/limits.h" <System_Header> # 102 "/usr/include/limits.h" <System_Header> # 106 "/usr/include/limits.h" <System_Header> # 116 "/usr/include/limits.h" <System_Header> # 120 "/usr/include/limits.h" <System_Header> # 128 "/usr/include/limits.h" <System_Header> # 142 "/usr/include/limits.h" <System_Header> # 143 "/usr/include/limits.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 33 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 36 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 46 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 53 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 56 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 59 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 63 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 66 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 69 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 72 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 75 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 82 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 95 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 98 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 102 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 105 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 108 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 111 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 114 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 117 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 120 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 123 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 127 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 130 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 133 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 136 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 155 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 159 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 160 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 37 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 38 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 1 "/usr/include/linux/limits.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 40 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 45 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 55 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 61 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 63 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 66 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 68 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 71 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 73 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 77 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 80 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 83 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 86 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 89 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 92 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 95 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 98 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header> # 161 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 169 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header> # 144 "/usr/include/limits.h" <System_Header> # 147 "/usr/include/limits.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 20 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 32 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 35 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 46 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 54 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 60 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 87 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header> # 148 "/usr/include/limits.h" <System_Header> # 169 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 8 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 57 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 66 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 71 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 77 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 85 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 102 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 108 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 116 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 122 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 127 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 133 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 138 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 144 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 163 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header> # 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header> # 10 "main.c" # 11 "main.c" # 27 "main.c" int __MACC_NUMGPUS = - 1 ; # 29 "main.c" int __macc_get_num_gpus ( ) { return acc_get_num_devices ( acc_device_nvidia ) ; } # 34 "main.c" int * __MACC_TOPOLOGY ; # 36 "main.c" void __macc_set_gpu_num ( int i ) { acc_set_device_num ( __MACC_TOPOLOGY [ i ] , acc_device_nvidia ) ; } # 44 "main.c" struct __MaccDataTableEntry { void * addr ; void * addr_ub ; int type_size ; int entire_lb ; int entire_ub ; int dirty ; int dirty_lb ; int dirty_ub ; int offset ; struct __MaccDataTableEntry * next ; } ; # 57 "main.c" struct __MaccDataTable { struct __MaccDataTableEntry * entries [ 256 ] ; } ; # 61 "main.c" struct __MaccDataTable * __MACC_DATA_TABLE_SET ; # 67 "main.c" struct __MaccDataWrapCache { void * addr [ 16 * 16 ] ; struct __MaccDataTableEntry * entry [ 16 * 16 ] ; int offset [ 16 * 16 ] ; int cachenum [ 16 ] ; } ; # 74 "main.c" struct __MaccDataWrapCache * __MACC_DATA_WRAP_CACHE_SET ; # 76 "main.c" void __macc_data_table_insert ( int gpu_num , void * ptr , int type_size , int entire_lb , int entire_ub ) { int index = ( ( ( long ) ptr / 16 ) % 256 ) ; # 81 "main.c" struct __MaccDataTableEntry * new_entry = malloc_managed ( sizeof ( struct __MaccDataTableEntry ) ) ; # 83 "main.c" new_entry -> addr = ptr ; new_entry -> addr_ub = ptr + entire_ub * type_size ; new_entry -> type_size = type_size ; new_entry -> entire_lb = entire_lb ; new_entry -> entire_ub = entire_ub ; new_entry -> dirty = 0 ; new_entry -> dirty_lb = - 1 ; new_entry -> dirty_ub = - 1 ; new_entry -> next = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ; # 93 "main.c" __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = new_entry ; } # 96 "main.c" struct __MaccDataTableEntry * __macc_data_table_find ( int gpu_num , void * ptr ) { int index = ( ( ( long ) ptr / 16 ) % 256 ) ; struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ; # 101 "main.c" while ( entry != ( ( void * ) 0 ) ) { if ( entry -> addr == ptr ) { entry -> offset = 0 ; return entry ; } # 107 "main.c" entry = entry -> next ; } # 110 "main.c" struct __MaccDataWrapCache wrap_cache = __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] ; int lane = ( ( ( long ) ptr / 16 ) % 16 ) ; # 113 "main.c" for ( int i = 0 ; i < wrap_cache . cachenum [ lane ] ; i ++ ) { if ( ptr == wrap_cache . addr [ lane * 16 + i ] ) { entry = wrap_cache . entry [ lane * 16 + i ] ; entry -> offset = wrap_cache . offset [ lane * 16 + i ] ; return entry ; } } # 121 "main.c" for ( int i = 0 ; i < 256 ; i ++ ) { entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ i ] ; # 124 "main.c" while ( entry != ( ( void * ) 0 ) ) { if ( entry -> addr <= ptr && ptr <= entry -> addr_ub ) { int offset = ( ptr - entry -> addr ) / entry -> type_size ; # 128 "main.c" int cachenum = wrap_cache . cachenum [ lane ] ; # 130 "main.c" if ( cachenum == 16 ) { cachenum = 0 ; } # 134 "main.c" wrap_cache . addr [ lane * 16 + cachenum ] = entry -> addr ; wrap_cache . entry [ lane * 16 + cachenum ] = entry ; wrap_cache . offset [ lane * 16 + cachenum ] = offset ; # 138 "main.c" wrap_cache . cachenum [ lane ] = cachenum + 1 ; # 140 "main.c" entry -> offset = offset ; return entry ; } # 144 "main.c" entry = entry -> next ; } } # 148 "main.c" fprintf ( stderr , "Error on __macc_data_table_find: Not found the item %p\n" , ptr ) ; exit ( - 1 ) ; # 151 "main.c" return ( ( void * ) 0 ) ; } # 154 "main.c" void __macc_data_table_delete ( int gpu_num , void * ptr ) { int index = ( ( ( long ) ptr / 16 ) % 256 ) ; struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ; struct __MaccDataTableEntry * pre = ( ( void * ) 0 ) ; # 160 "main.c" memset ( __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] . cachenum , 0 , 16 * sizeof ( int ) ) ; # 162 "main.c" if ( entry != ( ( void * ) 0 ) ) { if ( entry -> addr == ptr ) { __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = entry -> next ; free_managed ( entry ) ; return ; } # 169 "main.c" pre = entry ; entry = entry -> next ; } # 173 "main.c" while ( pre != ( ( void * ) 0 ) && entry != ( ( void * ) 0 ) ) { if ( entry -> addr == ptr ) { pre -> next = entry -> next ; free_managed ( entry ) ; return ; } # 180 "main.c" pre = entry ; entry = entry -> next ; } # 184 "main.c" fprintf ( stderr , "Error on __macc_data_table_delete: Not found the item %p\n" , ptr ) ; exit ( - 1 ) ; } # 188 "main.c" void __macc_delete ( int gpu_num , void * ptr , int type_size , int lb , int length ) { acc_delete_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ; __macc_data_table_delete ( gpu_num , ptr ) ; acc_wait ( gpu_num ) ; } # 195 "main.c" void __macc_copyout ( int gpu_num , void * ptr , int type_size , int lb , int length ) { struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ; # 199 "main.c" if ( entry -> dirty ) acc_update_self_async ( ( entry -> addr + entry -> dirty_lb * entry -> type_size ) , ( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) , gpu_num ) ; # 204 "main.c" __macc_delete ( gpu_num , ptr , type_size , lb , length ) ; } # 207 "main.c" void __macc_copyin ( int gpu_num , void * ptr , int type_size , int lb , int length ) { acc_copyin_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ; __macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ; acc_wait ( gpu_num ) ; } # 214 "main.c" void __macc_create ( int gpu_num , void * ptr , int type_size , int lb , int length ) { acc_create_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ; __macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ; acc_wait ( gpu_num ) ; } # 221 "main.c" void * __macc_malloc ( unsigned long size ) { void * ret = malloc_managed ( size ) ; # 225 "main.c" # 225 "main.c" #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { __macc_create ( omp_get_thread_num ( ) , ret , 1 , 0 , size ) ; } # 230 "main.c" return ret ; } # 233 "main.c" void __macc_free ( void * ptr ) { # 235 "main.c" #pragma omp parallel num_threads ( __MACC_NUMGPUS ) { int gpu_num = omp_get_thread_num ( ) ; struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ; __macc_delete ( gpu_num , ptr , 1 , 0 , entry -> entire_ub + 1 ) ; } free_managed ( ptr ) ; } # 245 "main.c" void __macc_update_self ( int gpu_num , void * ptr , int type_size , int lb , int length ) { struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ; ptr = entry -> addr ; lb += entry -> offset ; int ub = lb + length - 1 ; # 252 "main.c" if ( entry -> dirty && ( ! ( entry -> dirty_lb > ub || entry -> dirty_ub < lb ) ) ) { int new_lb = ( ( ( entry -> dirty_lb ) > ( lb ) ) ? ( entry -> dirty_lb ) : ( lb ) ) ; int new_ub = ( ( ( entry -> dirty_ub ) < ( ub ) ) ? ( entry -> dirty_ub ) : ( ub ) ) ; acc_update_self ( ( ptr + new_lb * type_size ) , ( ( new_ub - new_lb + 1 ) * type_size ) ) ; } } # 259 "main.c" void __macc_update_device ( int gpu_num , void * ptr , int type_size , int lb , int length ) { acc_update_device ( ( ptr + lb * type_size ) , length * type_size ) ; } # 264 "main.c" void __macc_init_access_region ( int gpu_num , int * lb_set , int * ub_set ) { lb_set [ gpu_num ] = 2147483647 ; ub_set [ gpu_num ] = - 1 ; } # 270 "main.c" void __macc_update_access_region ( int gpu_num , int * lb_set , int * ub_set , int val ) { lb_set [ gpu_num ] = ( ( ( lb_set [ gpu_num ] ) < ( val ) ) ? ( lb_set [ gpu_num ] ) : ( val ) ) ; ub_set [ gpu_num ] = ( ( ( ub_set [ gpu_num ] ) > ( val ) ) ? ( ub_set [ gpu_num ] ) : ( val ) ) ; } # 276 "main.c" int __macc_region_is_overlapping ( int * lb_set , int * ub_set ) { for ( int i = 0 ; i < __MACC_NUMGPUS - 1 ; i ++ ) for ( int j = i + 1 ; j < __MACC_NUMGPUS ; j ++ ) if ( ( ! ( lb_set [ i ] > ub_set [ j ] || ub_set [ i ] < lb_set [ j ] ) ) ) return 1 ; # 283 "main.c" return 0 ; } # 286 "main.c" void __macc_calc_loop_region ( int * loop_lb_set , int * loop_ub_set , int entire_start , int entire_end , int step , int until_equal ) { int tmp = entire_start + step * ( ( entire_end - entire_start ) / step ) ; entire_end = tmp - ( ( until_equal || entire_end != tmp ) ? 0 : step ) ; # 294 "main.c" int len = entire_end - entire_start + step ; int width = ( int ) ( ( float ) len / __MACC_NUMGPUS ) ; width -= width % step ; int rem = ( len - width * __MACC_NUMGPUS ) / step ; width -= step ; # 300 "main.c" int pos = entire_start ; # 302 "main.c" for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) { loop_lb_set [ i ] = pos ; pos = ( width < 0 ) ? pos : ( ( ( pos + width + ( ( i < rem ) ? step : 0 ) ) < ( entire_end ) ) ? ( pos + width + ( ( i < rem ) ? step : 0 ) ) : ( entire_end ) ) ; loop_ub_set [ i ] = pos ; pos = ( ( ( pos + step ) < ( entire_end ) ) ? ( pos + step ) : ( entire_end ) ) ; } } # 310 "main.c" void __macc_adjust_data_region ( void * ptr , int gpu_num , int * lb_set , int * ub_set ) { struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ; # 314 "main.c" lb_set [ gpu_num ] += entry -> offset ; ub_set [ gpu_num ] += entry -> offset ; } # 318 "main.c" void __macc_rewrite_loop_region_into_single ( int * loop_lb_set , int * loop_ub_set ) { loop_ub_set [ 0 ] = loop_ub_set [ __MACC_NUMGPUS - 1 ] ; # 322 "main.c" for ( int i = 1 ; i < __MACC_NUMGPUS ; i ++ ) { loop_lb_set [ i ] = 1 ; loop_ub_set [ i ] = 0 ; } } # 328 "main.c" void __macc_rewrite_data_region_into_single ( int * lb_set , int * ub_set ) { int gpu_ub = __MACC_NUMGPUS - 1 ; lb_set [ 0 ] = ( ( ( lb_set [ 0 ] ) < ( lb_set [ gpu_ub ] ) ) ? ( lb_set [ 0 ] ) : ( lb_set [ gpu_ub ] ) ) ; ub_set [ 0 ] = ( ( ( ub_set [ 0 ] ) > ( ub_set [ gpu_ub ] ) ) ? ( ub_set [ 0 ] ) : ( ub_set [ gpu_ub ] ) ) ; } # 335 "main.c" void __macc_sync_data ( int gpu_num , void * ptr , int type_size , int lb , int ub ) { void * update_addr = ( ptr + lb * type_size ) ; size_t length_b = ( ( ub - lb + 1 ) * type_size ) ; # 340 "main.c" acc_update_self ( update_addr , length_b ) ; # 342 "main.c" for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) { if ( i != gpu_num ) { __macc_set_gpu_num ( i ) ; acc_update_device ( update_addr , length_b ) ; } } # 352 "main.c" __macc_set_gpu_num ( gpu_num ) ; } # 355 "main.c" void __macc_set_data_region ( int gpu_num , void * ptr , int multi , int use_type , int * use_lb_set , int * use_ub_set , int def_type , int * def_lb_set , int * def_ub_set ) { struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ; ptr = entry -> addr ; # 363 "main.c" if ( entry -> dirty && ( multi || gpu_num != 0 ) && __MACC_NUMGPUS > 1 ) { int update_all = 0 ; int update_all_DtoH = 0 ; # 370 "main.c" if ( use_type == 0 || def_type == 0 ) update_all = 1 ; # 373 "main.c" else if ( def_type == 2 ) { for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) { if ( i != gpu_num && ( ! ( entry -> dirty_lb > def_ub_set [ i ] || entry -> dirty_ub < def_lb_set [ i ] ) ) ) { # 378 "main.c" update_all = 1 ; break ; } } } # 384 "main.c" if ( ! update_all ) { int every_whole = 1 ; int unused_lb = entry -> dirty_lb ; int unused_ub = entry -> dirty_ub ; # 389 "main.c" for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) { if ( i != gpu_num ) { if ( ( use_lb_set [ i ] <= entry -> dirty_lb && entry -> dirty_ub <= use_ub_set [ i ] ) ) { # 393 "main.c" update_all_DtoH = 1 ; } else { every_whole = 0 ; # 398 "main.c" if ( use_lb_set [ i ] <= unused_lb ) unused_lb = ( ( ( unused_lb ) > ( use_ub_set [ i ] + 1 ) ) ? ( unused_lb ) : ( use_ub_set [ i ] + 1 ) ) ; else if ( use_ub_set [ i ] >= unused_ub ) unused_ub = ( ( ( unused_ub ) < ( use_lb_set [ i ] - 1 ) ) ? ( unused_ub ) : ( use_lb_set [ i ] - 1 ) ) ; } } } # 406 "main.c" if ( every_whole ) update_all = 1 ; if ( unused_ub < unused_lb ) update_all_DtoH = 1 ; } # 412 "main.c" if ( update_all ) { __macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ; entry -> dirty = 0 ; } # 418 "main.c" else if ( entry -> dirty && use_type == 2 ) { int thread_num = multi ? __MACC_NUMGPUS : 1 ; # 422 "main.c" if ( update_all_DtoH ) acc_update_self ( ( ptr + entry -> dirty_lb * entry -> type_size ) , ( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) ) ; # 426 "main.c" for ( int i = 0 ; i < thread_num ; i ++ ) { # 431 "main.c" if ( i != gpu_num && ( ! ( entry -> dirty_lb > use_ub_set [ i ] || entry -> dirty_ub < use_lb_set [ i ] ) ) ) { # 435 "main.c" int update_lb = ( ( ( entry -> dirty_lb ) > ( use_lb_set [ i ] ) ) ? ( entry -> dirty_lb ) : ( use_lb_set [ i ] ) ) ; int update_ub = ( ( ( entry -> dirty_ub ) < ( use_ub_set [ i ] ) ) ? ( entry -> dirty_ub ) : ( use_ub_set [ i ] ) ) ; void * update_addr = ( ptr + update_lb * entry -> type_size ) ; size_t length_b = ( ( update_ub - update_lb + 1 ) * entry -> type_size ) ; # 440 "main.c" if ( ! update_all_DtoH ) { __macc_set_gpu_num ( gpu_num ) ; acc_update_self ( update_addr , length_b ) ; } __macc_set_gpu_num ( i ) ; acc_update_device ( update_addr , length_b ) ; } } __macc_set_gpu_num ( gpu_num ) ; } } # 453 "main.c" if ( ( multi || gpu_num == 0 ) && def_type != 1 ) { if ( def_type == 0 ) { entry -> dirty = 1 ; entry -> dirty_lb = entry -> entire_lb ; entry -> dirty_ub = entry -> entire_ub ; } # 465 "main.c" else if ( ! ( entry -> dirty ) ) { entry -> dirty = 1 ; entry -> dirty_lb = def_lb_set [ gpu_num ] ; entry -> dirty_ub = def_ub_set [ gpu_num ] ; } # 471 "main.c" else if ( ( ! ( entry -> dirty_lb > def_ub_set [ gpu_num ] || entry -> dirty_ub < def_lb_set [ gpu_num ] ) ) || # 477 "main.c" entry -> dirty_lb == def_ub_set [ gpu_num ] + 1 || def_lb_set [ gpu_num ] == entry -> dirty_ub + 1 ) { entry -> dirty_lb = ( ( ( entry -> dirty_lb ) < ( def_lb_set [ gpu_num ] ) ) ? ( entry -> dirty_lb ) : ( def_lb_set [ gpu_num ] ) ) ; entry -> dirty_ub = ( ( ( entry -> dirty_ub ) > ( def_ub_set [ gpu_num ] ) ) ? ( entry -> dirty_ub ) : ( def_ub_set [ gpu_num ] ) ) ; } # 485 "main.c" else { __macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ; entry -> dirty_lb = def_lb_set [ gpu_num ] ; entry -> dirty_ub = def_ub_set [ gpu_num ] ; } } } # 493 "main.c" void __macc_set_data_region_multi ( int gpu_num , int multi , int len , void * * ptrs , int * use_type , int * * use_lb_set , int * * use_ub_set , int * def_type , int * * def_lb_set , int * * def_ub_set ) { for ( int i = 0 ; i < len ; i ++ ) { int tnum = i ; # 504 "main.c" __macc_set_gpu_num ( gpu_num ) ; # 506 "main.c" __macc_set_data_region ( gpu_num , ptrs [ tnum ] , multi , use_type [ tnum ] , use_lb_set [ tnum ] , use_ub_set [ tnum ] , def_type [ tnum ] , def_lb_set [ tnum ] , def_ub_set [ tnum ] ) ; } } # 513 "main.c" void __macc_init ( ) { char * env_macc_numgpus = getenv ( "MACC_NUMGPUS" ) ; # 517 "main.c" if ( env_macc_numgpus != ( ( void * ) 0 ) ) { __MACC_NUMGPUS = atoi ( env_macc_numgpus ) ; } else { __MACC_NUMGPUS = __macc_get_num_gpus ( ) ; } # 524 "main.c" if ( __MACC_NUMGPUS <= 0 ) { fputs ( "[MACC ERROR] No GPU device found." , stderr ) ; exit ( - 1 ) ; } # 529 "main.c" __MACC_TOPOLOGY = malloc_managed ( __MACC_NUMGPUS * sizeof ( int ) ) ; char * topo = getenv ( "MACC_TOPOLOGY" ) ; # 532 "main.c" if ( topo != ( ( void * ) 0 ) ) { int i = 0 ; topo = strtok ( topo , "," ) ; while ( topo != ( ( void * ) 0 ) && i < __MACC_NUMGPUS ) { __MACC_TOPOLOGY [ i ] = atoi ( topo ) ; topo = strtok ( ( ( void * ) 0 ) , "," ) ; i ++ ; } } else { for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) __MACC_TOPOLOGY [ i ] = i ; } # 545 "main.c" # 558 "main.c" __MACC_DATA_TABLE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataTable ) ) ; __MACC_DATA_WRAP_CACHE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataWrapCache ) ) ; # 561 "main.c" for ( int t = 0 ; t < 10 ; t ++ ) { printf ( "[MACC] Wake up (%d)\n" , t ) ; # 565 "main.c" int n = 256 * 1024 * 1024 ; int * tmp = malloc_managed ( n * sizeof ( int ) ) ; # 568 "main.c" # 568 "main.c" #pragma acc data copy ( tmp [ 0 : n ] ) { # 570 "main.c" #pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector # 572 "main.c" for ( int i = 1 ; i < n ; i ++ ) tmp [ i ] = i ; # 575 "main.c" # 575 "main.c" #pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector # 577 "main.c" for ( int i = 1 ; i < n ; i ++ ) tmp [ n - i ] += i ; } # 581 "main.c" free_managed ( tmp ) ; } } # 598 "main.c" # 601 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * malloc_managed ( size_t ) ; extern void * calloc_managed ( size_t , size_t ) ; extern void free_managed ( void * ) ; extern void cfree_managed ( void * ) ; # 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * realloc_managed ( void * , size_t ) ; extern void * valloc_managed ( size_t ) ; extern void * pvalloc_managed ( size_t ) ; extern void * memalign_managed ( size_t , size_t ) ; extern int posix_memalign_managed ( void * * , size_t , size_t ) ; extern char * tmpnam_managed ( char * ) ; # 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 602 "main.c" # 602 "main.c" # 1 "/usr/include/stdio.h" <System_Header> # 17 "/usr/include/stdio.h" <System_Header> # 21 "/usr/include/stdio.h" <System_Header> # 603 "main.c" # 604 "main.c" void c_print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter , double t , double mops , char * optype , int passed_verification , char * npbversion , char * compiletime , char * cc , char * clink , char * c_lib , char * c_inc , char * cflags , char * clinkflags ) { printf ( "\n\n %s Benchmark Completed\n" , name ) ; # 625 "main.c" printf ( " Class = %c\n" , class ) ; # 627 "main.c" if ( n3 == 0 ) { long nn = n1 ; if ( n2 != 0 ) nn *= n2 ; printf ( " Size = %12ld\n" , nn ) ; } else printf ( " Size = %4dx%4dx%4d\n" , n1 , n2 , n3 ) ; # 635 "main.c" printf ( " Iterations = %12d\n" , niter ) ; printf ( " Time in seconds = %12.2f\n" , t ) ; # 639 "main.c" printf ( " Mop/s total = %12.2f\n" , mops ) ; # 641 "main.c" printf ( " Operation type = %24s\n" , optype ) ; # 643 "main.c" if ( passed_verification < 0 ) printf ( " Verification = NOT PERFORMED\n" ) ; else if ( passed_verification ) printf ( " Verification = SUCCESSFUL\n" ) ; else printf ( " Verification = UNSUCCESSFUL\n" ) ; # 650 "main.c" printf ( " Version = %12s\n" , npbversion ) ; # 652 "main.c" printf ( " Compile date = %12s\n" , compiletime ) ; # 654 "main.c" printf ( "\n Compile options:\n" ) ; # 656 "main.c" printf ( " CC = %s\n" , cc ) ; # 658 "main.c" printf ( " CLINK = %s\n" , clink ) ; # 660 "main.c" printf ( " C_LIB = %s\n" , c_lib ) ; # 662 "main.c" printf ( " C_INC = %s\n" , c_inc ) ; # 664 "main.c" printf ( " CFLAGS = %s\n" , cflags ) ; # 666 "main.c" printf ( " CLINKFLAGS = %s\n" , clinkflags ) ; # 672 "main.c" printf ( "\n--------------------------------------\n" ) ; printf ( " Please send all errors/feedbacks to:\n" ) ; printf ( " Center for Manycore Programming\n" ) ; printf ( " cmp@aces.snu.ac.kr\n" ) ; printf ( " http://aces.snu.ac.kr\n" ) ; printf ( "--------------------------------------\n" ) ; } # 680 "main.c" # 1 "../../common/wtime.h" # 3 "../../common/wtime.h" # 681 "main.c" # 681 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * malloc_managed ( size_t ) ; extern void * calloc_managed ( size_t , size_t ) ; extern void free_managed ( void * ) ; extern void cfree_managed ( void * ) ; # 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * realloc_managed ( void * , size_t ) ; extern void * valloc_managed ( size_t ) ; extern void * pvalloc_managed ( size_t ) ; extern void * memalign_managed ( size_t , size_t ) ; extern int posix_memalign_managed ( void * * , size_t , size_t ) ; extern char * tmpnam_managed ( char * ) ; # 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 682 "main.c" # 683 "main.c" void wtime_ ( double * ) ; # 687 "main.c" static double elapsed_time ( void ) { double t ; # 694 "main.c" wtime_ ( & t ) ; return ( t ) ; } # 699 "main.c" static double start [ 64 ] , elapsed [ 64 ] ; # 701 "main.c" void timer_clear ( int n ) { elapsed [ n ] = 0.0 ; } # 710 "main.c" void timer_start ( int n ) { start [ n ] = elapsed_time ( ) ; } # 719 "main.c" void timer_stop ( int n ) { double t , now ; # 726 "main.c" now = elapsed_time ( ) ; t = now - start [ n ] ; elapsed [ n ] += t ; # 730 "main.c" } # 733 "main.c" double timer_read ( int n ) { return ( elapsed [ n ] ) ; } # 741 "main.c" # 1 "/usr/include/stdio.h" <System_Header> # 17 "/usr/include/stdio.h" <System_Header> # 21 "/usr/include/stdio.h" <System_Header> # 742 "main.c" # 742 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 29 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 35 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double tgamma ( double ) ; float tgammaf ( float ) ; # 38 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double round ( double ) ; float roundf ( float ) ; long int lround ( double ) ; long int lroundf ( float ) ; long long int llround ( double ) ; long long int llroundf ( float ) ; # 59 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 1 "/usr/include/math.h" <System_Header> # 17 "/usr/include/math.h" <System_Header> # 21 "/usr/include/math.h" <System_Header> # 26 "/usr/include/math.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 27 "/usr/include/math.h" <System_Header> # 30 "/usr/include/math.h" <System_Header> # 31 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header> # 25 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header> # 31 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header> # 32 "/usr/include/math.h" <System_Header> # 34 "/usr/include/math.h" <System_Header> # 35 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> # 18 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> # 1 "/usr/include/endian.h" <System_Header> # 16 "/usr/include/endian.h" <System_Header> # 40 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> # 41 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> typedef union { unsigned char __c [ 8 ] ; double __d ; } __huge_val_t ; # 50 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header> static __huge_val_t __huge_val = { { 0 , 0 , 0 , 0 , 0 , 0 , 0xf0 , 0x7f } } ; # 36 "/usr/include/math.h" <System_Header> # 37 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header> # 18 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header> typedef union { unsigned char __c [ 4 ] ; float __f ; } __huge_valf_t ; # 48 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header> static __huge_valf_t __huge_valf = { { 0 , 0 , 0x80 , 0x7f } } ; # 38 "/usr/include/math.h" <System_Header> # 38 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header> # 18 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header> # 37 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header> static union { unsigned char __c [ 12 ] ; long double __ld ; } __huge_vall = { { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0x80 , 0xff , 0x7f , 0 , 0 } } ; # 39 "/usr/include/math.h" <System_Header> # 40 "/usr/include/math.h" <System_Header> # 41 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header> # 42 "/usr/include/math.h" <System_Header> # 43 "/usr/include/math.h" <System_Header> # 44 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> # 39 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> # 1 "/usr/include/endian.h" <System_Header> # 16 "/usr/include/endian.h" <System_Header> # 40 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> # 48 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header> static union { unsigned char __c [ 4 ] ; float __d ; } __qnan_union __attribute__ ( ( __unused__ ) ) = { { 0 , 0 , 0xc0 , 0x7f } } ; # 45 "/usr/include/math.h" <System_Header> # 47 "/usr/include/math.h" <System_Header> # 48 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header> typedef float float_t ; typedef double double_t ; # 41 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header> # 46 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header> # 49 "/usr/include/math.h" <System_Header> # 53 "/usr/include/math.h" <System_Header> # 83 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double acos ( double __x ) ; extern double __acos ( double __x ) ; extern double asin ( double __x ) ; extern double __asin ( double __x ) ; extern double atan ( double __x ) ; extern double __atan ( double __x ) ; extern double atan2 ( double __y , double __x ) ; extern double __atan2 ( double __y , double __x ) ; # 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double cos ( double __x ) ; extern double __cos ( double __x ) ; extern double sin ( double __x ) ; extern double __sin ( double __x ) ; extern double tan ( double __x ) ; extern double __tan ( double __x ) ; # 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double cosh ( double __x ) ; extern double __cosh ( double __x ) ; extern double sinh ( double __x ) ; extern double __sinh ( double __x ) ; extern double tanh ( double __x ) ; extern double __tanh ( double __x ) ; # 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double acosh ( double __x ) ; extern double __acosh ( double __x ) ; extern double asinh ( double __x ) ; extern double __asinh ( double __x ) ; extern double atanh ( double __x ) ; extern double __atanh ( double __x ) ; # 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double exp ( double __x ) ; extern double __exp ( double __x ) ; # 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double frexp ( double __x , int * __exponent ) ; extern double __frexp ( double __x , int * __exponent ) ; # 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double ldexp ( double __x , int __exponent ) ; extern double __ldexp ( double __x , int __exponent ) ; # 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double log ( double __x ) ; extern double __log ( double __x ) ; # 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double log10 ( double __x ) ; extern double __log10 ( double __x ) ; # 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double modf ( double __x , double * __iptr ) ; extern double __modf ( double __x , double * __iptr ) ; # 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double expm1 ( double __x ) ; extern double __expm1 ( double __x ) ; # 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double log1p ( double __x ) ; extern double __log1p ( double __x ) ; # 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double logb ( double __x ) ; extern double __logb ( double __x ) ; # 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double exp2 ( double __x ) ; extern double __exp2 ( double __x ) ; # 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double log2 ( double __x ) ; extern double __log2 ( double __x ) ; # 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double pow ( double __x , double __y ) ; extern double __pow ( double __x , double __y ) ; # 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double sqrt ( double __x ) ; extern double __sqrt ( double __x ) ; # 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double hypot ( double __x , double __y ) ; extern double __hypot ( double __x , double __y ) ; # 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double cbrt ( double __x ) ; extern double __cbrt ( double __x ) ; # 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double ceil ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __ceil ( double __x ) __attribute__ ( ( __const__ ) ) ; # 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fabs ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __fabs ( double __x ) __attribute__ ( ( __const__ ) ) ; # 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double floor ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __floor ( double __x ) __attribute__ ( ( __const__ ) ) ; # 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fmod ( double __x , double __y ) ; extern double __fmod ( double __x , double __y ) ; # 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isinf ( double __value ) __attribute__ ( ( __const__ ) ) ; # 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __finite ( double __value ) __attribute__ ( ( __const__ ) ) ; # 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isinf ( double __value ) __attribute__ ( ( __const__ ) ) ; # 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int finite ( double __value ) __attribute__ ( ( __const__ ) ) ; # 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double drem ( double __x , double __y ) ; extern double __drem ( double __x , double __y ) ; # 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double significand ( double __x ) ; extern double __significand ( double __x ) ; # 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; # 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern double __nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; # 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isnan ( double __value ) __attribute__ ( ( __const__ ) ) ; # 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isnan ( double __value ) __attribute__ ( ( __const__ ) ) ; # 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double j0 ( double ) ; extern double __j0 ( double ) ; extern double j1 ( double ) ; extern double __j1 ( double ) ; extern double jn ( int , double ) ; extern double __jn ( int , double ) ; extern double y0 ( double ) ; extern double __y0 ( double ) ; extern double y1 ( double ) ; extern double __y1 ( double ) ; extern double yn ( int , double ) ; extern double __yn ( int , double ) ; # 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double erf ( double ) ; extern double __erf ( double ) ; extern double erfc ( double ) ; extern double __erfc ( double ) ; extern double lgamma ( double ) ; extern double __lgamma ( double ) ; # 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double tgamma ( double ) ; extern double __tgamma ( double ) ; # 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double gamma ( double ) ; extern double __gamma ( double ) ; # 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double lgamma_r ( double , int * __signgamp ) ; extern double __lgamma_r ( double , int * __signgamp ) ; # 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double rint ( double __x ) ; extern double __rint ( double __x ) ; # 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; # 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double remainder ( double __x , double __y ) ; extern double __remainder ( double __x , double __y ) ; # 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double scalbn ( double __x , int __n ) ; extern double __scalbn ( double __x , int __n ) ; # 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int ilogb ( double __x ) ; extern int __ilogb ( double __x ) ; # 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double scalbln ( double __x , long int __n ) ; extern double __scalbln ( double __x , long int __n ) ; # 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double nearbyint ( double __x ) ; extern double __nearbyint ( double __x ) ; # 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double round ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __round ( double __x ) __attribute__ ( ( __const__ ) ) ; # 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double trunc ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __trunc ( double __x ) __attribute__ ( ( __const__ ) ) ; # 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double remquo ( double __x , double __y , int * __quo ) ; extern double __remquo ( double __x , double __y , int * __quo ) ; # 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lrint ( double __x ) ; extern long int __lrint ( double __x ) ; # 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llrint ( double __x ) ; extern long long int __llrint ( double __x ) ; # 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lround ( double __x ) ; extern long int __lround ( double __x ) ; # 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llround ( double __x ) ; extern long long int __llround ( double __x ) ; # 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fdim ( double __x , double __y ) ; extern double __fdim ( double __x , double __y ) ; # 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; # 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; # 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __fpclassify ( double __value ) __attribute__ ( ( __const__ ) ) ; # 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __signbit ( double __value ) __attribute__ ( ( __const__ ) ) ; # 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double fma ( double __x , double __y , double __z ) ; extern double __fma ( double __x , double __y , double __z ) ; # 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern double scalb ( double __x , double __n ) ; extern double __scalb ( double __x , double __n ) ; # 84 "/usr/include/math.h" <System_Header> # 94 "/usr/include/math.h" <System_Header> # 104 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float acosf ( float __x ) ; extern float __acosf ( float __x ) ; extern float asinf ( float __x ) ; extern float __asinf ( float __x ) ; extern float atanf ( float __x ) ; extern float __atanf ( float __x ) ; extern float atan2f ( float __y , float __x ) ; extern float __atan2f ( float __y , float __x ) ; # 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float cosf ( float __x ) ; extern float __cosf ( float __x ) ; extern float sinf ( float __x ) ; extern float __sinf ( float __x ) ; extern float tanf ( float __x ) ; extern float __tanf ( float __x ) ; # 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float coshf ( float __x ) ; extern float __coshf ( float __x ) ; extern float sinhf ( float __x ) ; extern float __sinhf ( float __x ) ; extern float tanhf ( float __x ) ; extern float __tanhf ( float __x ) ; # 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float acoshf ( float __x ) ; extern float __acoshf ( float __x ) ; extern float asinhf ( float __x ) ; extern float __asinhf ( float __x ) ; extern float atanhf ( float __x ) ; extern float __atanhf ( float __x ) ; # 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float expf ( float __x ) ; extern float __expf ( float __x ) ; # 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float frexpf ( float __x , int * __exponent ) ; extern float __frexpf ( float __x , int * __exponent ) ; # 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float ldexpf ( float __x , int __exponent ) ; extern float __ldexpf ( float __x , int __exponent ) ; # 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float logf ( float __x ) ; extern float __logf ( float __x ) ; # 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float log10f ( float __x ) ; extern float __log10f ( float __x ) ; # 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float modff ( float __x , float * __iptr ) ; extern float __modff ( float __x , float * __iptr ) ; # 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float expm1f ( float __x ) ; extern float __expm1f ( float __x ) ; # 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float log1pf ( float __x ) ; extern float __log1pf ( float __x ) ; # 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float logbf ( float __x ) ; extern float __logbf ( float __x ) ; # 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float exp2f ( float __x ) ; extern float __exp2f ( float __x ) ; # 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float log2f ( float __x ) ; extern float __log2f ( float __x ) ; # 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float powf ( float __x , float __y ) ; extern float __powf ( float __x , float __y ) ; # 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float sqrtf ( float __x ) ; extern float __sqrtf ( float __x ) ; # 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float hypotf ( float __x , float __y ) ; extern float __hypotf ( float __x , float __y ) ; # 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float cbrtf ( float __x ) ; extern float __cbrtf ( float __x ) ; # 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ; # 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ; # 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float floorf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __floorf ( float __x ) __attribute__ ( ( __const__ ) ) ; # 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fmodf ( float __x , float __y ) ; extern float __fmodf ( float __x , float __y ) ; # 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isinff ( float __value ) __attribute__ ( ( __const__ ) ) ; # 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __finitef ( float __value ) __attribute__ ( ( __const__ ) ) ; # 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isinff ( float __value ) __attribute__ ( ( __const__ ) ) ; # 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int finitef ( float __value ) __attribute__ ( ( __const__ ) ) ; # 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float dremf ( float __x , float __y ) ; extern float __dremf ( float __x , float __y ) ; # 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float significandf ( float __x ) ; extern float __significandf ( float __x ) ; # 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; # 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern float __nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; # 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ; # 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ; # 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float j0f ( float ) ; extern float __j0f ( float ) ; extern float j1f ( float ) ; extern float __j1f ( float ) ; extern float jnf ( int , float ) ; extern float __jnf ( int , float ) ; extern float y0f ( float ) ; extern float __y0f ( float ) ; extern float y1f ( float ) ; extern float __y1f ( float ) ; extern float ynf ( int , float ) ; extern float __ynf ( int , float ) ; # 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float erff ( float ) ; extern float __erff ( float ) ; extern float erfcf ( float ) ; extern float __erfcf ( float ) ; extern float lgammaf ( float ) ; extern float __lgammaf ( float ) ; # 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float tgammaf ( float ) ; extern float __tgammaf ( float ) ; # 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float gammaf ( float ) ; extern float __gammaf ( float ) ; # 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float lgammaf_r ( float , int * __signgamp ) ; extern float __lgammaf_r ( float , int * __signgamp ) ; # 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float rintf ( float __x ) ; extern float __rintf ( float __x ) ; # 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; # 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern float __nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float remainderf ( float __x , float __y ) ; extern float __remainderf ( float __x , float __y ) ; # 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float scalbnf ( float __x , int __n ) ; extern float __scalbnf ( float __x , int __n ) ; # 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int ilogbf ( float __x ) ; extern int __ilogbf ( float __x ) ; # 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float scalblnf ( float __x , long int __n ) ; extern float __scalblnf ( float __x , long int __n ) ; # 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float nearbyintf ( float __x ) ; extern float __nearbyintf ( float __x ) ; # 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float roundf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __roundf ( float __x ) __attribute__ ( ( __const__ ) ) ; # 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float truncf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __truncf ( float __x ) __attribute__ ( ( __const__ ) ) ; # 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float remquof ( float __x , float __y , int * __quo ) ; extern float __remquof ( float __x , float __y , int * __quo ) ; # 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lrintf ( float __x ) ; extern long int __lrintf ( float __x ) ; # 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llrintf ( float __x ) ; extern long long int __llrintf ( float __x ) ; # 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lroundf ( float __x ) ; extern long int __lroundf ( float __x ) ; # 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llroundf ( float __x ) ; extern long long int __llroundf ( float __x ) ; # 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fdimf ( float __x , float __y ) ; extern float __fdimf ( float __x , float __y ) ; # 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; # 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; # 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __fpclassifyf ( float __value ) __attribute__ ( ( __const__ ) ) ; # 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __signbitf ( float __value ) __attribute__ ( ( __const__ ) ) ; # 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float fmaf ( float __x , float __y , float __z ) ; extern float __fmaf ( float __x , float __y , float __z ) ; # 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern float scalbf ( float __x , float __n ) ; extern float __scalbf ( float __x , float __n ) ; # 105 "/usr/include/math.h" <System_Header> # 140 "/usr/include/math.h" <System_Header> # 151 "/usr/include/math.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double acosl ( long double __x ) ; extern long double __acosl ( long double __x ) ; extern long double asinl ( long double __x ) ; extern long double __asinl ( long double __x ) ; extern long double atanl ( long double __x ) ; extern long double __atanl ( long double __x ) ; extern long double atan2l ( long double __y , long double __x ) ; extern long double __atan2l ( long double __y , long double __x ) ; # 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double cosl ( long double __x ) ; extern long double __cosl ( long double __x ) ; extern long double sinl ( long double __x ) ; extern long double __sinl ( long double __x ) ; extern long double tanl ( long double __x ) ; extern long double __tanl ( long double __x ) ; # 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double coshl ( long double __x ) ; extern long double __coshl ( long double __x ) ; extern long double sinhl ( long double __x ) ; extern long double __sinhl ( long double __x ) ; extern long double tanhl ( long double __x ) ; extern long double __tanhl ( long double __x ) ; # 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double acoshl ( long double __x ) ; extern long double __acoshl ( long double __x ) ; extern long double asinhl ( long double __x ) ; extern long double __asinhl ( long double __x ) ; extern long double atanhl ( long double __x ) ; extern long double __atanhl ( long double __x ) ; # 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double expl ( long double __x ) ; extern long double __expl ( long double __x ) ; # 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double frexpl ( long double __x , int * __exponent ) ; extern long double __frexpl ( long double __x , int * __exponent ) ; # 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double ldexpl ( long double __x , int __exponent ) ; extern long double __ldexpl ( long double __x , int __exponent ) ; # 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double logl ( long double __x ) ; extern long double __logl ( long double __x ) ; # 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double log10l ( long double __x ) ; extern long double __log10l ( long double __x ) ; # 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double modfl ( long double __x , long double * __iptr ) ; extern long double __modfl ( long double __x , long double * __iptr ) ; # 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double expm1l ( long double __x ) ; extern long double __expm1l ( long double __x ) ; # 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double log1pl ( long double __x ) ; extern long double __log1pl ( long double __x ) ; # 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double logbl ( long double __x ) ; extern long double __logbl ( long double __x ) ; # 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double exp2l ( long double __x ) ; extern long double __exp2l ( long double __x ) ; # 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double log2l ( long double __x ) ; extern long double __log2l ( long double __x ) ; # 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double powl ( long double __x , long double __y ) ; extern long double __powl ( long double __x , long double __y ) ; # 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double sqrtl ( long double __x ) ; extern long double __sqrtl ( long double __x ) ; # 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double hypotl ( long double __x , long double __y ) ; extern long double __hypotl ( long double __x , long double __y ) ; # 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double cbrtl ( long double __x ) ; extern long double __cbrtl ( long double __x ) ; # 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ; # 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ; # 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ; # 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fmodl ( long double __x , long double __y ) ; extern long double __fmodl ( long double __x , long double __y ) ; # 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double dreml ( long double __x , long double __y ) ; extern long double __dreml ( long double __x , long double __y ) ; # 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double significandl ( long double __x ) ; extern long double __significandl ( long double __x ) ; # 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern long double __nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; # 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double j0l ( long double ) ; extern long double __j0l ( long double ) ; extern long double j1l ( long double ) ; extern long double __j1l ( long double ) ; extern long double jnl ( int , long double ) ; extern long double __jnl ( int , long double ) ; extern long double y0l ( long double ) ; extern long double __y0l ( long double ) ; extern long double y1l ( long double ) ; extern long double __y1l ( long double ) ; extern long double ynl ( int , long double ) ; extern long double __ynl ( int , long double ) ; # 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double erfl ( long double ) ; extern long double __erfl ( long double ) ; extern long double erfcl ( long double ) ; extern long double __erfcl ( long double ) ; extern long double lgammal ( long double ) ; extern long double __lgammal ( long double ) ; # 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double tgammal ( long double ) ; extern long double __tgammal ( long double ) ; # 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double gammal ( long double ) ; extern long double __gammal ( long double ) ; # 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double lgammal_r ( long double , int * __signgamp ) ; extern long double __lgammal_r ( long double , int * __signgamp ) ; # 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double rintl ( long double __x ) ; extern long double __rintl ( long double __x ) ; # 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double remainderl ( long double __x , long double __y ) ; extern long double __remainderl ( long double __x , long double __y ) ; # 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double scalbnl ( long double __x , int __n ) ; extern long double __scalbnl ( long double __x , int __n ) ; # 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int ilogbl ( long double __x ) ; extern int __ilogbl ( long double __x ) ; # 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double scalblnl ( long double __x , long int __n ) ; extern long double __scalblnl ( long double __x , long int __n ) ; # 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double nearbyintl ( long double __x ) ; extern long double __nearbyintl ( long double __x ) ; # 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ; # 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ; # 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double remquol ( long double __x , long double __y , int * __quo ) ; extern long double __remquol ( long double __x , long double __y , int * __quo ) ; # 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> # 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lrintl ( long double __x ) ; extern long int __lrintl ( long double __x ) ; # 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llrintl ( long double __x ) ; extern long long int __llrintl ( long double __x ) ; # 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long int lroundl ( long double __x ) ; extern long int __lroundl ( long double __x ) ; # 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long long int llroundl ( long double __x ) ; extern long long int __llroundl ( long double __x ) ; # 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fdiml ( long double __x , long double __y ) ; extern long double __fdiml ( long double __x , long double __y ) ; # 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; # 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __fpclassifyl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern int __signbitl ( long double __value ) __attribute__ ( ( __const__ ) ) ; # 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double fmal ( long double __x , long double __y , long double __z ) ; extern long double __fmal ( long double __x , long double __y , long double __z ) ; # 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header> extern long double scalbl ( long double __x , long double __n ) ; extern long double __scalbl ( long double __x , long double __n ) ; # 152 "/usr/include/math.h" <System_Header> # 167 "/usr/include/math.h" <System_Header> extern int signgam ; # 172 "/usr/include/math.h" <System_Header> # 206 "/usr/include/math.h" <System_Header> # 208 "/usr/include/math.h" <System_Header> enum { FP_NAN = # 213 "/usr/include/math.h" <System_Header> 0 , FP_INFINITE = # 216 "/usr/include/math.h" <System_Header> 1 , FP_ZERO = # 219 "/usr/include/math.h" <System_Header> 2 , FP_SUBNORMAL = # 222 "/usr/include/math.h" <System_Header> 3 , FP_NORMAL = # 225 "/usr/include/math.h" <System_Header> 4 } ; # 230 "/usr/include/math.h" <System_Header> # 232 "/usr/include/math.h" <System_Header> # 248 "/usr/include/math.h" <System_Header> # 268 "/usr/include/math.h" <System_Header> # 282 "/usr/include/math.h" <System_Header> # 290 "/usr/include/math.h" <System_Header> # 304 "/usr/include/math.h" <System_Header> # 318 "/usr/include/math.h" <System_Header> # 324 "/usr/include/math.h" <System_Header> # 346 "/usr/include/math.h" <System_Header> typedef enum { _IEEE_ = - 1 , _SVID_ , _XOPEN_ , _POSIX_ , _ISOC_ } _LIB_VERSION_TYPE ; # 358 "/usr/include/math.h" <System_Header> extern _LIB_VERSION_TYPE _LIB_VERSION ; # 368 "/usr/include/math.h" <System_Header> # 372 "/usr/include/math.h" <System_Header> struct exception # 374 "/usr/include/math.h" <System_Header> { int type ; char * name ; double arg1 ; double arg2 ; double retval ; } ; # 385 "/usr/include/math.h" <System_Header> extern int matherr ( struct exception * __exc ) ; # 390 "/usr/include/math.h" <System_Header> # 398 "/usr/include/math.h" <System_Header> # 411 "/usr/include/math.h" <System_Header> # 430 "/usr/include/math.h" <System_Header> # 450 "/usr/include/math.h" <System_Header> # 470 "/usr/include/math.h" <System_Header> # 476 "/usr/include/math.h" <System_Header> # 482 "/usr/include/math.h" <System_Header> # 484 "/usr/include/math.h" <System_Header> # 492 "/usr/include/math.h" <System_Header> # 500 "/usr/include/math.h" <System_Header> # 508 "/usr/include/math.h" <System_Header> # 516 "/usr/include/math.h" <System_Header> # 524 "/usr/include/math.h" <System_Header> # 60 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 254 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 301 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 310 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_acos ( double ) ; # 313 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_asin ( double ) ; # 316 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_atan2 ( double , double ) ; # 319 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_atan ( double ) ; # 322 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_tan ( double ) ; # 325 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_cos ( double ) ; # 328 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_sin ( double ) ; # 331 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_fabs ( double ) ; # 334 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_sqrt ( double ) ; # 337 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_log ( double ) ; # 340 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_log10 ( double ) ; # 343 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_exp ( double ) ; # 346 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_pow ( double , double ) ; # 350 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_fmin ( double , double ) ; # 353 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_fminf ( float , float ) ; # 356 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> double __builtin_fmax ( double , double ) ; # 359 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_fmaxf ( float , float ) ; # 362 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_acosf ( float ) ; # 365 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_asinf ( float ) ; # 368 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_atan2f ( float , float ) ; # 371 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_atanf ( float ) ; # 374 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_tanf ( float ) ; # 377 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_cosf ( float ) ; # 380 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_sinf ( float ) ; # 383 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_fabsf ( float ) ; # 386 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_sqrtf ( float ) ; # 389 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_logf ( float ) ; # 392 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_log10f ( float ) ; # 395 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_expf ( float ) ; # 398 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> float __builtin_powf ( float , float ) ; # 406 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 418 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> #pragma libm ( acosf , acoshf , asinf , asinhf , atanhf , atan2f ) #pragma libm ( cbrtf , ceilf , copysignf , cosf , coshf ) #pragma libm ( erff , erfcf , expf , exp2f , exp10f , expm1f ) #pragma libm ( fabsf , floorf , fmaf , fminf , fmaxf ) #pragma libm ( ilogbf ) #pragma libm ( ldexpf , lgammaf , llrintf , llroundf , logbf , log1pf , logf , log2f , log10f , lrintf , lroundf ) #pragma libm ( nanf , nearbyintf , nextafterf ) #pragma libm ( powf ) #pragma libm ( remainderf , remquof , rintf , roundf , rsqrtf ) #pragma libm ( scalblnf , scalbnf , sinf , sinhf , sqrtf ) #pragma libm ( tanf , tanhf , tgammaf , truncf ) #pragma libm ( abs , acos , acosh , asin , asinh , atanh , atan2 ) #pragma libm ( cbrt , ceil , copysign , cos , cosh ) #pragma libm ( erf , erfc , exp , exp2 , exp10 , expm1 ) #pragma libm ( fabs , floor , fma , fmin , fmax ) #pragma libm ( ilogb , isinf , isfinite , isnan ) #pragma libm ( ldexp , lgamma , llrint , llround , logb , log1p , log , log2 , log10 , lrint , lround ) #pragma libm ( pow ) #pragma libm ( nan , nearbyint , nextafter ) #pragma libm ( remainder , remquo , rint , round , rsqrt ) #pragma libm ( scalbln , scalbn , sin , sinh , sqrt ) #pragma libm ( tan , tanh , tgamma , trunc ) # 743 "main.c" # 743 "main.c" # 1 "../../common/type.h" # 4 "../../common/type.h" typedef enum { false , true } logical ; typedef struct { double real ; double imag ; } dcomplex ; # 744 "main.c" # 746 "main.c" void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter , double t , double mops , char * optype , logical verified , char * npbversion , char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 , char * cs6 , char * cs7 ) { char size [ 16 ] ; int j ; # 754 "main.c" printf ( "\n\n %s Benchmark Completed.\n" , name ) ; printf ( " Class = %12c\n" , class ) ; # 757 "main.c" # 762 "main.c" if ( ( n2 == 0 ) && ( n3 == 0 ) ) { if ( ( name [ 0 ] == 'E' ) && ( name [ 1 ] == 'P' ) ) { sprintf ( size , "%15.0lf" , __builtin_pow ( 2.0 , n1 ) ) ; j = 14 ; if ( size [ j ] == '.' ) { size [ j ] = ' ' ; j -- ; } size [ j + 1 ] = '\0' ; printf ( " Size = %15s\n" , size ) ; } else { printf ( " Size = %12d\n" , n1 ) ; } } else { printf ( " Size = %4dx%4dx%4d\n" , n1 , n2 , n3 ) ; } # 779 "main.c" printf ( " Iterations = %12d\n" , niter ) ; printf ( " Time in seconds = %12.2lf\n" , t ) ; printf ( " Mop/s total = %15.2lf\n" , mops ) ; printf ( " Operation type = %24s\n" , optype ) ; if ( verified ) printf ( " Verification = %12s\n" , "SUCCESSFUL" ) ; else printf ( " Verification = %12s\n" , "UNSUCCESSFUL" ) ; printf ( " Version = %12s\n" , npbversion ) ; printf ( " Compile date = %12s\n" , compiletime ) ; printf ( "\n Compile options:\n" " CC = %s\n" , cs1 ) ; printf ( " CLINK = %s\n" , cs2 ) ; printf ( " C_LIB = %s\n" , cs3 ) ; printf ( " C_INC = %s\n" , cs4 ) ; printf ( " CFLAGS = %s\n" , cs5 ) ; printf ( " CLINKFLAGS = %s\n" , cs6 ) ; printf ( " RAND = %s\n" , cs7 ) ; # 799 "main.c" printf ( "\n--------------------------------------\n" " Please send all errors/feedbacks to:\n" " Center for Manycore Programming\n" " cmp@aces.snu.ac.kr\n" " http://aces.snu.ac.kr\n" "--------------------------------------\n\n" ) ; } # 806 "main.c" # 1 "/usr/include/stdio.h" <System_Header> # 17 "/usr/include/stdio.h" <System_Header> # 21 "/usr/include/stdio.h" <System_Header> # 807 "main.c" # 807 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 808 "main.c" # 809 "main.c" double randlc ( double * x , double a ) { # 834 "main.c" # 841 "main.c" const double r23 = 1.1920928955078125e-07 ; const double r46 = r23 * r23 ; const double t23 = 8.388608e+06 ; const double t46 = t23 * t23 ; # 846 "main.c" double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ; double r ; # 849 "main.c" t1 = r23 * a ; a1 = ( int ) t1 ; a2 = a - t23 * a1 ; # 856 "main.c" t1 = r23 * ( * x ) ; x1 = ( int ) t1 ; x2 = * x - t23 * x1 ; t1 = a1 * x2 + a2 * x1 ; t2 = ( int ) ( r23 * t1 ) ; z = t1 - t23 * t2 ; t3 = t23 * z + a2 * x2 ; t4 = ( int ) ( r46 * t3 ) ; * x = t3 - t46 * t4 ; r = r46 * ( * x ) ; # 872 "main.c" return r ; } # 876 "main.c" void vranlc ( int n , double * x , double a , double y [ ] ) { # 901 "main.c" # 908 "main.c" const double r23 = 1.1920928955078125e-07 ; const double r46 = r23 * r23 ; const double t23 = 8.388608e+06 ; const double t46 = t23 * t23 ; # 913 "main.c" double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ; # 915 "main.c" int i ; # 917 "main.c" t1 = r23 * a ; a1 = ( int ) t1 ; a2 = a - t23 * a1 ; # 924 "main.c" for ( i = 0 ; i < n ; i ++ ) { t1 = r23 * ( * x ) ; x1 = ( int ) t1 ; x2 = * x - t23 * x1 ; t1 = a1 * x2 + a2 * x1 ; t2 = ( int ) ( r23 * t1 ) ; z = t1 - t23 * t2 ; t3 = t23 * z + a2 * x2 ; t4 = ( int ) ( r46 * t3 ) ; * x = t3 - t46 * t4 ; y [ i ] = r46 * ( * x ) ; } # 945 "main.c" return ; } # 948 "main.c" # 1 "../../common/wtime.h" # 3 "../../common/wtime.h" # 949 "main.c" # 949 "main.c" # 1 "/usr/include/time.h" <System_Header> # 16 "/usr/include/time.h" <System_Header> # 20 "/usr/include/time.h" <System_Header> # 27 "/usr/include/time.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 28 "/usr/include/time.h" <System_Header> # 34 "/usr/include/time.h" <System_Header> # 37 "/usr/include/time.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header> # 38 "/usr/include/time.h" <System_Header> # 40 "/usr/include/time.h" <System_Header> # 41 "/usr/include/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 44 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 47 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 60 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 62 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 64 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 66 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 68 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 70 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 72 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 74 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 76 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 78 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 80 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 83 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 42 "/usr/include/time.h" <System_Header> # 43 "/usr/include/time.h" <System_Header> # 132 "/usr/include/time.h" <System_Header> struct tm { int tm_sec ; int tm_min ; int tm_hour ; int tm_mday ; int tm_mon ; int tm_year ; int tm_wday ; int tm_yday ; int tm_isdst ; # 146 "/usr/include/time.h" <System_Header> long int tm_gmtoff ; const char * tm_zone ; # 152 "/usr/include/time.h" <System_Header> } ; # 160 "/usr/include/time.h" <System_Header> struct itimerspec { struct timespec it_interval ; struct timespec it_value ; } ; # 167 "/usr/include/time.h" <System_Header> struct sigevent ; # 188 "/usr/include/time.h" <System_Header> extern clock_t clock ( void ) ; # 191 "/usr/include/time.h" <System_Header> extern time_t time ( time_t * __timer ) ; # 194 "/usr/include/time.h" <System_Header> extern double difftime ( time_t __time1 , time_t __time0 ) __attribute__ ( ( __const__ ) ) ; # 198 "/usr/include/time.h" <System_Header> extern time_t mktime ( struct tm * __tp ) ; # 204 "/usr/include/time.h" <System_Header> extern size_t strftime ( char * __restrict __s , size_t __maxsize , const char * __restrict __format , const struct tm * __restrict __tp ) ; # 220 "/usr/include/time.h" <System_Header> # 221 "/usr/include/time.h" <System_Header> # 1 "/usr/include/xlocale.h" <System_Header> # 18 "/usr/include/xlocale.h" <System_Header> # 222 "/usr/include/time.h" <System_Header> # 223 "/usr/include/time.h" <System_Header> extern size_t strftime_l ( char * __restrict __s , size_t __maxsize , const char * __restrict __format , const struct tm * __restrict __tp , __locale_t __loc ) ; # 238 "/usr/include/time.h" <System_Header> extern struct tm * gmtime ( const time_t * __timer ) ; # 242 "/usr/include/time.h" <System_Header> extern struct tm * localtime ( const time_t * __timer ) ; # 248 "/usr/include/time.h" <System_Header> extern struct tm * gmtime_r ( const time_t * __restrict __timer , struct tm * __restrict __tp ) ; # 253 "/usr/include/time.h" <System_Header> extern struct tm * localtime_r ( const time_t * __restrict __timer , struct tm * __restrict __tp ) ; # 260 "/usr/include/time.h" <System_Header> extern char * asctime ( const struct tm * __tp ) ; # 263 "/usr/include/time.h" <System_Header> extern char * ctime ( const time_t * __timer ) ; # 268 "/usr/include/time.h" <System_Header> # 271 "/usr/include/time.h" <System_Header> extern char * asctime_r ( const struct tm * __restrict __tp , char * __restrict __buf ) ; # 275 "/usr/include/time.h" <System_Header> extern char * ctime_r ( const time_t * __restrict __timer , char * __restrict __buf ) ; # 281 "/usr/include/time.h" <System_Header> extern char * __tzname [ 2 ] ; extern int __daylight ; extern long int __timezone ; # 288 "/usr/include/time.h" <System_Header> extern char * tzname [ 2 ] ; # 292 "/usr/include/time.h" <System_Header> extern void tzset ( void ) ; # 297 "/usr/include/time.h" <System_Header> extern int daylight ; extern long int timezone ; # 303 "/usr/include/time.h" <System_Header> extern int stime ( const time_t * __when ) ; # 309 "/usr/include/time.h" <System_Header> # 316 "/usr/include/time.h" <System_Header> # 318 "/usr/include/time.h" <System_Header> extern time_t timegm ( struct tm * __tp ) ; # 321 "/usr/include/time.h" <System_Header> extern time_t timelocal ( struct tm * __tp ) ; # 324 "/usr/include/time.h" <System_Header> extern int dysize ( int __year ) __attribute__ ( ( __const__ ) ) ; # 333 "/usr/include/time.h" <System_Header> extern int nanosleep ( const struct timespec * __requested_time , struct timespec * __remaining ) ; # 338 "/usr/include/time.h" <System_Header> extern int clock_getres ( clockid_t __clock_id , struct timespec * __res ) ; # 341 "/usr/include/time.h" <System_Header> extern int clock_gettime ( clockid_t __clock_id , struct timespec * __tp ) ; # 344 "/usr/include/time.h" <System_Header> extern int clock_settime ( clockid_t __clock_id , const struct timespec * __tp ) ; # 352 "/usr/include/time.h" <System_Header> extern int clock_nanosleep ( clockid_t __clock_id , int __flags , const struct timespec * __req , struct timespec * __rem ) ; # 357 "/usr/include/time.h" <System_Header> extern int clock_getcpuclockid ( pid_t __pid , clockid_t * __clock_id ) ; # 362 "/usr/include/time.h" <System_Header> extern int timer_create ( clockid_t __clock_id , struct sigevent * __restrict __evp , timer_t * __restrict __timerid ) ; # 367 "/usr/include/time.h" <System_Header> extern int timer_delete ( timer_t __timerid ) ; # 370 "/usr/include/time.h" <System_Header> extern int timer_settime ( timer_t __timerid , int __flags , const struct itimerspec * __restrict __value , struct itimerspec * __restrict __ovalue ) ; # 375 "/usr/include/time.h" <System_Header> extern int timer_gettime ( timer_t __timerid , struct itimerspec * __value ) ; # 379 "/usr/include/time.h" <System_Header> extern int timer_getoverrun ( timer_t __timerid ) ; # 950 "main.c" # 951 "main.c" # 1 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 16 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 1 "/usr/include/features.h" <System_Header> # 16 "/usr/include/features.h" <System_Header> # 22 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 23 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header> # 24 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 25 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 1 "/usr/include/time.h" <System_Header> # 16 "/usr/include/time.h" <System_Header> # 20 "/usr/include/time.h" <System_Header> # 26 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 27 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header> # 28 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 29 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header> # 30 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 54 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> struct timezone { int tz_minuteswest ; int tz_dsttime ; } ; # 61 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> typedef struct timezone * __restrict __timezone_ptr_t ; # 70 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int gettimeofday ( struct timeval * __restrict __tv , __timezone_ptr_t __tz ) ; # 76 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int settimeofday ( const struct timeval * __tv , const struct timezone * __tz ) ; # 84 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int adjtime ( const struct timeval * __delta , struct timeval * __olddelta ) ; # 90 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> enum __itimer_which { ITIMER_REAL = 0 , # 96 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> ITIMER_VIRTUAL = 1 , # 99 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> ITIMER_PROF = 2 # 103 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> } ; # 106 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> struct itimerval { struct timeval it_interval ; struct timeval it_value ; } ; # 120 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> typedef int __itimer_which_t ; # 124 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int getitimer ( __itimer_which_t __which , struct itimerval * __value ) ; # 130 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int setitimer ( __itimer_which_t __which , const struct itimerval * __restrict __new , struct itimerval * __restrict __old ) ; # 137 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int utimes ( const char * __file , const struct timeval __tvp [ 2 ] ) ; # 142 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int lutimes ( const char * __file , const struct timeval __tvp [ 2 ] ) ; # 146 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> extern int futimes ( int __fd , const struct timeval __tvp [ 2 ] ) ; # 161 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header> # 952 "main.c" # 954 "main.c" void wtime_ ( double * t ) { static int sec = - 1 ; struct timeval tv ; gettimeofday ( & tv , ( void * ) 0 ) ; if ( sec < 0 ) sec = tv . tv_sec ; * t = ( tv . tv_sec - sec ) + 1.0e-6 * tv . tv_usec ; } # 963 "main.c" # 992 "main.c" # 997 "main.c" # 1023 "main.c" # 1027 "main.c" # 1 "/usr/include/stdio.h" <System_Header> # 17 "/usr/include/stdio.h" <System_Header> # 21 "/usr/include/stdio.h" <System_Header> # 1028 "main.c" # 1028 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header> # 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * malloc_managed ( size_t ) ; extern void * calloc_managed ( size_t , size_t ) ; extern void free_managed ( void * ) ; extern void cfree_managed ( void * ) ; # 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> extern void * realloc_managed ( void * , size_t ) ; extern void * valloc_managed ( size_t ) ; extern void * pvalloc_managed ( size_t ) ; extern void * memalign_managed ( size_t , size_t ) ; extern int posix_memalign_managed ( void * * , size_t , size_t ) ; extern char * tmpnam_managed ( char * ) ; # 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header> # 1029 "main.c" # 1029 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header> # 1030 "main.c" # 1031 "main.c" # 1 "../globals.h" # 29 "../globals.h" # 34 "../globals.h" # 1 "../npbparams.h" # 6 "../npbparams.h" # 35 "../globals.h" # 35 "../globals.h" # 1 "../../common/type.h" # 36 "../globals.h" # 37 "../globals.h" # 49 "../globals.h" # 56 "../globals.h" # 65 "../globals.h" # 74 "../globals.h" # 83 "../globals.h" # 92 "../globals.h" # 101 "../globals.h" # 110 "../globals.h" # 1032 "main.c" # 1032 "main.c" # 1 "../../common/randdp.h" # 4 "../../common/randdp.h" double randlc ( double * x , double a ) ; void vranlc ( int n , double * x , double a , double y [ ] ) ; # 1033 "main.c" # 1033 "main.c" # 1 "../../common/timers.h" # 4 "../../common/timers.h" void timer_clear ( int n ) ; void timer_start ( int n ) ; void timer_stop ( int n ) ; double timer_read ( int n ) ; # 1034 "main.c" # 1034 "main.c" # 1 "../../common/print_results.h" # 4 "../../common/print_results.h" void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter , double t , double mops , char * optype , int verified , char * npbversion , char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 , char * cs6 , char * cs7 ) ; # 1035 "main.c" # 1035 "main.c" # 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> # 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header> # 1036 "main.c" # 1036 "main.c" unsigned int nz = ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ; unsigned int naz = ( 150000 * ( 15 + 1 ) ) ; unsigned int na = 150000 ; static int colidx [ ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ] ; static int rowstr [ 150000 + 1 ] ; static int iv [ 150000 ] ; static int arow [ 150000 ] ; static int acol [ ( 150000 * ( 15 + 1 ) ) ] ; # 1047 "main.c" static double aelt [ ( 150000 * ( 15 + 1 ) ) ] ; static double a [ ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ] ; static double x [ 150000 + 2 ] ; static double z [ 150000 + 2 ] ; static double p [ 150000 + 2 ] ; static double q [ 150000 + 2 ] ; static double r [ 150000 + 2 ] ; # 1056 "main.c" static int naa ; static int nzz ; static int firstrow ; static int lastrow ; static int firstcol ; static int lastcol ; # 1064 "main.c" static double amult ; static double tran ; # 1068 "main.c" static logical timeron ; # 1073 "main.c" static void conj_grad ( int colidx [ ] , int rowstr [ ] , double x [ ] , double z [ ] , double a [ ] , double p [ ] , double q [ ] , double r [ ] , double * rnorm ) ; static void makea ( int n , int nz , double a [ ] , int colidx [ ] , int rowstr [ ] , int firstrow , int lastrow , int firstcol , int lastcol , int arow [ ] , int acol [ ] [ 15 + 1 ] , double aelt [ ] [ 15 + 1 ] , int iv [ ] ) ; static void sparse ( double a [ ] , int colidx [ ] , int rowstr [ ] , int n , int nz , int nozer , int arow [ ] , int acol [ ] [ 15 + 1 ] , double aelt [ ] [ 15 + 1 ] , int firstrow , int lastrow , int nzloc [ ] , double rcond , double shift ) ; static void sprnvc ( int n , int nz , int nn1 , double v [ ] , int iv [ ] ) ; static int icnvrt ( double x , int ipwr2 ) ; static void vecset ( int n , double v [ ] , int iv [ ] , int * nzv , int i , double val ) ; static int conj_calls = 0 ; static int loop_iter = 0 ; # 1118 "main.c" int main ( int argc , char * argv [ ] ) { int i , j , k , it ; int end ; # 1123 "main.c" double zeta ; double rnorm ; double norm_temp1 , norm_temp2 ; # 1127 "main.c" double t , mflops , tmax ; char Class ; int verified ; double zeta_verify_value , epsilon , err ; # 1132 "main.c" char * t_names [ 3 ] ; acc_init ( acc_device_default ) ; # 1135 "main.c" for ( i = 0 ; i < 3 ; i ++ ) { timer_clear ( i ) ; } FILE * fp ; if ( ( fp = fopen ( "timer.flag" , "r" ) ) != ( ( void * ) 0 ) ) { timeron = true ; t_names [ 0 ] = "init" ; t_names [ 1 ] = "benchmk" ; t_names [ 2 ] = "conjgd" ; fclose ( fp ) ; } else { timeron = false ; } # 1150 "main.c" timer_start ( 0 ) ; # 1152 "main.c" firstrow = 0 ; lastrow = 150000 - 1 ; firstcol = 0 ; lastcol = 150000 - 1 ; # 1157 "main.c" if ( 150000 == 1400 && 15 == 7 && 75 == 15 && 110.0 == 10 ) { Class = 'S' ; zeta_verify_value = 8.5971775078648 ; } else if ( 150000 == 7000 && 15 == 8 && 75 == 15 && 110.0 == 12 ) { Class = 'W' ; zeta_verify_value = 10.362595087124 ; } else if ( 150000 == 14000 && 15 == 11 && 75 == 15 && 110.0 == 20 ) { Class = 'A' ; zeta_verify_value = 17.130235054029 ; } else if ( 150000 == 75000 && 15 == 13 && 75 == 75 && 110.0 == 60 ) { Class = 'B' ; zeta_verify_value = 22.712745482631 ; } else if ( 150000 == 150000 && 15 == 15 && 75 == 75 && 110.0 == 110 ) { Class = 'C' ; zeta_verify_value = 28.973605592845 ; } else if ( 150000 == 1500000 && 15 == 21 && 75 == 100 && 110.0 == 500 ) { Class = 'D' ; zeta_verify_value = 52.514532105794 ; } else if ( 150000 == 9000000 && 15 == 26 && 75 == 100 && 110.0 == 1500 ) { Class = 'E' ; zeta_verify_value = 77.522164599383 ; } else { Class = 'U' ; } # 1182 "main.c" printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-ACC-C) - CG Benchmark\n\n" ) ; printf ( " Size: %11d\n" , 150000 ) ; printf ( " Iterations: %5d\n" , 75 ) ; printf ( "\n" ) ; # 1187 "main.c" naa = 150000 ; nzz = ( 150000 * ( 15 + 1 ) * ( 15 + 1 ) ) ; # 1190 "main.c" tran = 314159265.0 ; amult = 1220703125.0 ; zeta = randlc ( & tran , amult ) ; # 1197 "main.c" makea ( naa , nzz , a , colidx , rowstr , firstrow , lastrow , firstcol , lastcol , arow , ( int ( * ) [ 15 + 1 ] ) ( void * ) acol , ( double ( * ) [ 15 + 1 ] ) ( void * ) aelt , iv ) ; # 1207 "main.c" for ( j = 0 ; j < lastrow - firstrow + 1 ; j ++ ) { for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) { colidx [ k ] = colidx [ k ] - firstcol ; } } # 1222 "main.c" #pragma acc data copyin ( colidx [ 0 : nz ] , a [ 0 : nz ] , rowstr [ 0 : na + 1 ] ) create ( x [ 0 : na + 2 ] , z [ 0 : na + 2 ] , p [ 0 : na + 2 ] , q [ 0 : na + 2 ] , r [ 0 : na + 2 ] ) # 1227 "main.c" { int na_gangs = 150000 + 1 ; # 1232 "main.c" #pragma acc kernels loop gang ( ( na_gangs + 127 ) / 128 ) vector ( 128 ) for ( i = 0 ; i < 150000 + 1 ; i ++ ) { x [ i ] = 1.0 ; } # 1237 "main.c" end = lastcol - firstcol + 1 ; # 1238 "main.c" #pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 ) for ( j = 0 ; j < end ; j ++ ) { q [ j ] = 0.0 ; z [ j ] = 0.0 ; r [ j ] = 0.0 ; p [ j ] = 0.0 ; } # 1246 "main.c" zeta = 0.0 ; # 1248 "main.c" for ( it = 1 ; it <= 1 ; it ++ ) { conj_grad ( colidx , rowstr , x , z , a , p , q , r , & rnorm ) ; # 1259 "main.c" norm_temp1 = 0.0 ; norm_temp2 = 0.0 ; # 1267 "main.c" #pragma acc parallel loop num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : norm_temp2 ) # 1269 "main.c" for ( j = 0 ; j < end ; j ++ ) { norm_temp2 = norm_temp2 + z [ j ] * z [ j ] ; } # 1274 "main.c" norm_temp2 = 1.0 / __builtin_sqrt ( norm_temp2 ) ; # 1276 "main.c" # 1279 "main.c" #pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 ) for ( j = 0 ; j < end ; j ++ ) { x [ j ] = norm_temp2 * z [ j ] ; } } # 1286 "main.c" na_gangs = 150000 + 1 ; # 1290 "main.c" #pragma acc kernels loop gang ( ( na_gangs + 127 ) / 128 ) vector ( 128 ) for ( i = 0 ; i < 150000 + 1 ; i ++ ) { x [ i ] = 1.0 ; } # 1295 "main.c" zeta = 0.0 ; # 1297 "main.c" timer_stop ( 0 ) ; # 1299 "main.c" printf ( " Initialization time = %15.3f seconds\n" , timer_read ( 0 ) ) ; # 1301 "main.c" timer_start ( 1 ) ; # 1303 "main.c" for ( it = 1 ; it <= 75 ; it ++ ) { conj_grad ( colidx , rowstr , x , z , a , p , q , r , & rnorm ) ; # 1314 "main.c" norm_temp1 = 0.0 ; norm_temp2 = 0.0 ; # 1322 "main.c" #pragma acc parallel loop gang worker vector num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : norm_temp1 , norm_temp2 ) # 1324 "main.c" for ( j = 0 ; j < end ; j ++ ) { norm_temp1 = norm_temp1 + x [ j ] * z [ j ] ; norm_temp2 = norm_temp2 + z [ j ] * z [ j ] ; } # 1329 "main.c" norm_temp2 = 1.0 / __builtin_sqrt ( norm_temp2 ) ; # 1331 "main.c" zeta = 110.0 + 1.0 / norm_temp1 ; if ( it == 1 ) printf ( "\n iteration ||r|| zeta\n" ) ; printf ( " %5d %20.14E%20.13f\n" , it , rnorm , zeta ) ; # 1336 "main.c" # 1339 "main.c" #pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 ) for ( j = 0 ; j < end ; j ++ ) { x [ j ] = norm_temp2 * z [ j ] ; } } # 1345 "main.c" timer_stop ( 1 ) ; } # 1352 "main.c" t = timer_read ( 1 ) ; # 1354 "main.c" printf ( " Benchmark completed\n" ) ; # 1356 "main.c" epsilon = 1.0e-10 ; if ( Class != 'U' ) { err = __builtin_fabs ( zeta - zeta_verify_value ) / zeta_verify_value ; if ( err <= epsilon ) { verified = true ; printf ( " VERIFICATION SUCCESSFUL\n" ) ; printf ( " Zeta is %20.13E\n" , zeta ) ; printf ( " Error is %20.13E\n" , err ) ; } else { verified = false ; printf ( " VERIFICATION FAILED\n" ) ; printf ( " Zeta %20.13E\n" , zeta ) ; printf ( " The correct zeta is %20.13E\n" , zeta_verify_value ) ; } } else { verified = false ; printf ( " Problem size unknown\n" ) ; printf ( " NO VERIFICATION PERFORMED\n" ) ; } # 1376 "main.c" if ( t != 0.0 ) { mflops = ( double ) ( 2 * 75 * 150000 ) * ( 3.0 + ( double ) ( 15 * ( 15 + 1 ) ) + 25.0 * ( 5.0 + ( double ) ( 15 * ( 15 + 1 ) ) ) + 3.0 ) / t / 1000000.0 ; } else { mflops = 0.0 ; } # 1385 "main.c" print_results ( "CG" , Class , 150000 , 0 , 0 , 75 , t , mflops , " floating point" , verified , "3.3.1" , "06 Dec 2017" , "icc" , "icc" , "-lm" , "-I../common" , "-O3 -mcmodel=medium" , "-O3 -mcmodel=medium" , "randdp" ) ; # 1391 "main.c" if ( timeron ) { tmax = timer_read ( 1 ) ; if ( tmax == 0.0 ) tmax = 1.0 ; printf ( " SECTION Time (secs)\n" ) ; for ( i = 0 ; i < 3 ; i ++ ) { t = timer_read ( i ) ; if ( i == 0 ) { printf ( " %8s:%9.3f\n" , t_names [ i ] , t ) ; } else { printf ( " %8s:%9.3f (%6.2f%%)\n" , t_names [ i ] , t , t * 100.0 / tmax ) ; if ( i == 2 ) { t = tmax - t ; printf ( " --> %8s:%9.3f (%6.2f%%)\n" , "rest" , t , t * 100.0 / tmax ) ; } } } } # 1412 "main.c" acc_shutdown ( acc_device_default ) ; printf ( "conj calls=%d, loop iter = %d. \n" , conj_calls , loop_iter ) ; # 1415 "main.c" return 0 ; } # 1419 "main.c" static void conj_grad ( int colidx [ ] , int rowstr [ ] , double x [ ] , double z [ ] , double a [ ] , double p [ ] , double q [ ] , double r [ ] , double * rnorm ) { int j , k , tmp1 , tmp2 , tmp3 ; int end ; int cgit , cgitmax = 25 ; double d , sum , rho , rho0 , alpha , beta ; double sum_array [ 150000 + 2 ] ; conj_calls ++ ; rho = 0.0 ; unsigned int num_gangs = 0 ; # 1441 "main.c" #pragma acc data present ( colidx [ 0 : nz ] , rowstr [ 0 : na + 1 ] , x [ 0 : na + 2 ] , z [ 0 : na + 2 ] , a [ 0 : nz ] , p [ 0 : na + 2 ] , q [ 0 : na + 2 ] , r [ 0 : na + 2 ] ) # 1446 "main.c" { # 1450 "main.c" #pragma acc kernels loop gang ( ( naa + 127 ) / 128 ) vector ( 128 ) independent for ( j = 0 ; j < naa ; j ++ ) { q [ j ] = 0.0 ; z [ j ] = 0.0 ; r [ j ] = x [ j ] ; p [ j ] = r [ j ] ; } # 1458 "main.c" # 1463 "main.c" #pragma acc parallel loop gang worker vector num_gangs ( ( lastcol - firstcol + 1 + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : rho ) # 1465 "main.c" for ( j = 0 ; j < lastcol - firstcol + 1 ; j ++ ) { rho = rho + r [ j ] * r [ j ] ; } for ( cgit = 1 ; cgit <= cgitmax ; cgit ++ ) { # 1501 "main.c" loop_iter ++ ; end = lastrow - firstrow + 1 ; # 1506 "main.c" # 1506 "main.c" #pragma acc parallel num_gangs ( end ) num_workers ( 4 ) vector_length ( 32 ) { # 1508 "main.c" #pragma acc loop gang for ( j = 0 ; j < end ; j ++ ) { tmp1 = rowstr [ j ] ; tmp2 = rowstr [ j + 1 ] ; sum = 0.0 ; # 1513 "main.c" #pragma acc loop worker vector reduction ( + : sum ) for ( k = tmp1 ; k < tmp2 ; k ++ ) { tmp3 = colidx [ k ] ; sum = sum + a [ k ] * p [ tmp3 ] ; } q [ j ] = sum ; } } d = 0.0 ; end = lastcol - firstcol + 1 ; # 1526 "main.c" #pragma acc parallel num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) { # 1528 "main.c" #pragma acc loop gang worker vector reduction ( + : d ) for ( j = 0 ; j < end ; j ++ ) { d = d + p [ j ] * q [ j ] ; } } # 1534 "main.c" alpha = rho / d ; # 1539 "main.c" rho0 = rho ; # 1544 "main.c" rho = 0.0 ; # 1549 "main.c" #pragma acc kernels loop gang ( ( end + 1023 ) / 1024 ) vector ( 1024 ) independent for ( j = 0 ; j < end ; j ++ ) { z [ j ] = z [ j ] + alpha * p [ j ] ; r [ j ] = r [ j ] - alpha * q [ j ] ; } # 1559 "main.c" #pragma acc parallel num_gangs ( ( end + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) { # 1561 "main.c" #pragma acc loop gang worker vector reduction ( + : rho ) for ( j = 0 ; j < end ; j ++ ) { rho = rho + r [ j ] * r [ j ] ; } } # 1568 "main.c" beta = rho / rho0 ; # 1573 "main.c" # 1576 "main.c" #pragma acc kernels loop gang ( ( end + 127 ) / 128 ) vector ( 128 ) independent for ( j = 0 ; j < end ; j ++ ) { p [ j ] = r [ j ] + beta * p [ j ] ; } } # 1582 "main.c" # 1588 "main.c" end = lastrow - firstrow + 1 ; # 1590 "main.c" #pragma acc parallel loop gang num_gangs ( end ) num_workers ( 4 ) vector_length ( 32 ) # 1592 "main.c" for ( j = 0 ; j < end ; j ++ ) { tmp1 = rowstr [ j ] ; tmp2 = rowstr [ j + 1 ] ; d = 0.0 ; # 1596 "main.c" #pragma acc loop worker vector reduction ( + : d ) for ( k = tmp1 ; k < tmp2 ; k ++ ) { tmp3 = colidx [ k ] ; d = d + a [ k ] * z [ tmp3 ] ; } r [ j ] = d ; } sum = 0.0 ; # 1609 "main.c" #pragma acc parallel loop gang worker vector num_gangs ( ( lastcol - firstcol + 1 + 127 ) / 128 ) num_workers ( 4 ) vector_length ( 32 ) reduction ( + : sum ) # 1613 "main.c" for ( j = 0 ; j < lastcol - firstcol + 1 ; j ++ ) { d = x [ j ] - r [ j ] ; sum = sum + d * d ; } # 1618 "main.c" } * rnorm = __builtin_sqrt ( sum ) ; } # 1623 "main.c" static void makea ( int n , int nz , double a [ ] , int colidx [ ] , int rowstr [ ] , int firstrow , int lastrow , int firstcol , int lastcol , int arow [ ] , int acol [ ] [ 15 + 1 ] , double aelt [ ] [ 15 + 1 ] , int iv [ ] ) { int iouter , ivelt , nzv , nn1 ; int ivc [ 15 + 1 ] ; double vc [ 15 + 1 ] ; # 1666 "main.c" # 1670 "main.c" nn1 = 1 ; do { nn1 = 2 * nn1 ; } while ( nn1 < n ) ; # 1678 "main.c" for ( iouter = 0 ; iouter < n ; iouter ++ ) { nzv = 15 ; sprnvc ( n , nzv , nn1 , vc , ivc ) ; vecset ( n , vc , ivc , & nzv , iouter + 1 , 0.5 ) ; arow [ iouter ] = nzv ; for ( ivelt = 0 ; ivelt < nzv ; ivelt ++ ) { acol [ iouter ] [ ivelt ] = ivc [ ivelt ] - 1 ; aelt [ iouter ] [ ivelt ] = vc [ ivelt ] ; } } # 1693 "main.c" sparse ( a , colidx , rowstr , n , nz , 15 , arow , acol , aelt , firstrow , lastrow , iv , 1.0e-1 , 110.0 ) ; } # 1703 "main.c" static void sparse ( double a [ ] , int colidx [ ] , int rowstr [ ] , int n , int nz , int nozer , int arow [ ] , int acol [ ] [ 15 + 1 ] , double aelt [ ] [ 15 + 1 ] , int firstrow , int lastrow , int nzloc [ ] , double rcond , double shift ) { int nrows ; # 1724 "main.c" int i , j , j1 , j2 , nza , k , kk , nzrow , jcol ; double size , scale , ratio , va ; logical cont40 ; # 1732 "main.c" nrows = lastrow - firstrow + 1 ; # 1737 "main.c" for ( j = 0 ; j < nrows + 1 ; j ++ ) { rowstr [ j ] = 0 ; } # 1744 "main.c" for ( i = 0 ; i < n ; i ++ ) { for ( nza = 0 ; nza < arow [ i ] ; nza ++ ) { j = acol [ i ] [ nza ] + 1 ; rowstr [ j ] = rowstr [ j ] + arow [ i ] ; } } # 1751 "main.c" rowstr [ 0 ] = 0 ; for ( j = 1 ; j < nrows + 1 ; j ++ ) { rowstr [ j ] = rowstr [ j ] + rowstr [ j - 1 ] ; } nza = rowstr [ nrows ] - 1 ; # 1757 "main.c" if ( nza > nz ) { printf ( "Space for matrix elements exceeded in sparse\n" ) ; printf ( "nza, nzmax = %d, %d\n" , nza , nz ) ; exit ( 1 ) ; } # 1767 "main.c" for ( j = 0 ; j < nrows ; j ++ ) { for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) { a [ k ] = 0.0 ; colidx [ k ] = - 1 ; } nzloc [ j ] = 0 ; } # 1778 "main.c" size = 1.0 ; ratio = __builtin_pow ( rcond , ( 1.0 / ( double ) ( n ) ) ) ; # 1784 "main.c" for ( i = 0 ; i < n ; i ++ ) { for ( nza = 0 ; nza < arow [ i ] ; nza ++ ) { j = acol [ i ] [ nza ] ; # 1788 "main.c" scale = size * aelt [ i ] [ nza ] ; for ( nzrow = 0 ; nzrow < arow [ i ] ; nzrow ++ ) { jcol = acol [ i ] [ nzrow ] ; va = aelt [ i ] [ nzrow ] * scale ; # 1793 "main.c" if ( jcol == j && j == i ) { va = va + rcond - shift ; } # 1801 "main.c" cont40 = false ; for ( k = rowstr [ j ] ; k < rowstr [ j + 1 ] ; k ++ ) { if ( colidx [ k ] > jcol ) { for ( kk = rowstr [ j + 1 ] - 2 ; kk >= k ; kk -- ) { if ( colidx [ kk ] > - 1 ) { a [ kk + 1 ] = a [ kk ] ; colidx [ kk + 1 ] = colidx [ kk ] ; } } colidx [ k ] = jcol ; a [ k ] = 0.0 ; cont40 = true ; break ; } else if ( colidx [ k ] == - 1 ) { colidx [ k ] = jcol ; cont40 = true ; break ; } else if ( colidx [ k ] == jcol ) { nzloc [ j ] = nzloc [ j ] + 1 ; cont40 = true ; break ; } } if ( cont40 == false ) { printf ( "internal error in sparse: i=%d\n" , i ) ; exit ( 1 ) ; } a [ k ] = a [ k ] + va ; } } size = size * ratio ; } # 1840 "main.c" for ( j = 1 ; j < nrows ; j ++ ) { nzloc [ j ] = nzloc [ j ] + nzloc [ j - 1 ] ; } # 1847 "main.c" for ( j = 0 ; j < nrows ; j ++ ) { if ( j > 0 ) { j1 = rowstr [ j ] - nzloc [ j - 1 ] ; } else { j1 = 0 ; } j2 = rowstr [ j + 1 ] - nzloc [ j ] ; nza = rowstr [ j ] ; for ( k = j1 ; k < j2 ; k ++ ) { a [ k ] = a [ nza ] ; colidx [ k ] = colidx [ nza ] ; nza = nza + 1 ; } } for ( j = 1 ; j < nrows + 1 ; j ++ ) { rowstr [ j ] = rowstr [ j ] - nzloc [ j - 1 ] ; } nza = rowstr [ nrows ] - 1 ; } # 1868 "main.c" static void sprnvc ( int n , int nz , int nn1 , double v [ ] , int iv [ ] ) { int nzv , ii , i ; double vecelt , vecloc ; # 1882 "main.c" nzv = 0 ; # 1884 "main.c" while ( nzv < nz ) { vecelt = randlc ( & tran , amult ) ; # 1887 "main.c" vecloc = randlc ( & tran , amult ) ; i = icnvrt ( vecloc , nn1 ) + 1 ; if ( i > n ) continue ; # 1894 "main.c" logical was_gen = false ; for ( ii = 0 ; ii < nzv ; ii ++ ) { if ( iv [ ii ] == i ) { was_gen = true ; break ; } } if ( was_gen ) continue ; v [ nzv ] = vecelt ; iv [ nzv ] = i ; nzv = nzv + 1 ; } } # 1912 "main.c" static int icnvrt ( double x , int ipwr2 ) { return ( int ) ( ipwr2 * x ) ; } # 1921 "main.c" static void vecset ( int n , double v [ ] , int iv [ ] , int * nzv , int i , double val ) { int k ; logical set ; # 1930 "main.c" set = false ; for ( k = 0 ; k < * nzv ; k ++ ) { if ( iv [ k ] == i ) { v [ k ] = val ; set = true ; } } if ( set == false ) { v [ * nzv ] = val ; iv [ * nzv ] = i ; * nzv = * nzv + 1 ; } }
DRB106-taskwaitmissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "omprace.h" #include <omp.h> #include <stdio.h> /* This is a program based on a test contributed by Yizi Gu@Rice Univ. * Classic Fibonacci calculation using task but missing taskwait. * Data races pairs: i@61:5 vs i@65:12 * j@63:5 vs j@65:14 * */ unsigned int input = 10; int fib(unsigned int n) { if (n<2) return n; else { int i, j; #pragma omp task shared(i) i=fib(n-1); #pragma omp task shared(j) j=fib(n-2); int res= i+j; /* We move the original taskwait to a location after i+j to * simulate the missing taskwait mistake. * Directly removing the taskwait may cause a child task to write to i or j * within the stack of a parent task which may already be gone, causing seg fault. * This change is suggested by Joachim Protze @RWTH-Aachen. * */ #pragma omp taskwait return res; } } int main() { omprace_init(); int result = 0; #pragma omp parallel { #pragma omp single { result = fib(input); } } printf ("Fib(%d)=%d (correct answer should be 55)\n", input, result); omprace_fini(); return 0; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
keystore_fmt_plug.c
/* Java KeyStore cracker. Written by Dhiru Kholia <dhiru at openwall.com> and * Narendra Kangralkar <narendrakangralkar at gmail.com>. * * Input Format: $keystore$target$data_length$data$hash$nkeys$keylength$keydata$keylength$keydata... * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com> * and Narendra Kangralkar <narendrakangralkar at gmail.com> and it is hereby * released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * major re-write - JimF, Feb, 2016. * Added SIMD and prebuild all salt data for SIMD. * made a common code module (for sharing code with GPU) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_keystore; #elif FMT_REGISTERS_H john_register_one(&fmt_keystore); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "simd-intrinsics.h" //#undef SIMD_COEF_32 #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "dyna_salt.h" #include "johnswap.h" #include "keystore_common.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #if SIMD_COEF_32 #define OMP_SCALE 1024 #else #define OMP_SCALE 64 #endif #endif #elif SIMD_COEF_32 #define OMP_SCALE 128 #endif #include "memdbg.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #define FORMAT_LABEL "keystore" #define FORMAT_NAME "Java KeyStore" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct keystore_salt *) #define SALT_ALIGN sizeof(struct keystore_salt *) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static SHA_CTX (*saved_ctx); static int dirty; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static int *MixOrder, MixOrderLen; #ifdef SIMD_COEF_32 #define GETPOS(i, index) ((index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32) static unsigned salt_mem_total; typedef struct preload_t { // Only handle password lengths of 4 to 24 (21 elements) in this code. // passwords of other lengths are handled by oSSL CTX method. uint32_t (*first_blk)[21][SHA_BUF_SIZ*NBKEYS]; uint32_t *ex_data[21]; int n_ex[21]; // number of sha blocks in ex_data. unsigned char data_hash[20]; // to find if this one loaded before. struct preload_t *next; } preload; static preload *salt_preload; // this is our linked list. static preload *cursimd; // set_salt points this to the current salt. #endif typedef struct keystore_salt_t { dyna_salt dsalt; int target; int data_length; int count; int keysize; unsigned char data_hash[20]; // this is the SHA of the data block. unsigned char *data; unsigned char *keydata; void *ptr; // points to a pre-built salt record (only SIMD) } keystore_salt; static keystore_salt *keystore_cur_salt; /* To guard against tampering with the keystore, we append a keyed * hash with a bit of whitener. */ inline static void getPreKeyedHash(int idx) { int i, j; unsigned char passwdBytes[PLAINTEXT_LENGTH * 2]; const char *magic = "Mighty Aphrodite"; char *password = saved_key[idx]; SHA_CTX *ctxp = &saved_ctx[idx]; for (i=0, j=0; i < strlen(password); i++) { // should this be proper LE UTF16 encoded??? NOPE. We now have // a utf-8 encoded test hash, and the below method works. // actually tried utf8_to_utf16_be, and the ascii passwords // work fine, but the utf8 hash FAILS. //passwdBytes[j++] = (password[i] >> 8); passwdBytes[j++] = 0; passwdBytes[j++] = password[i]; } SHA1_Init(ctxp); SHA1_Update(ctxp, passwdBytes, saved_len[idx] * 2); SHA1_Update(ctxp, magic, 16); } static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #elif SIMD_COEF_32 self->params.max_keys_per_crypt *= OMP_SCALE; #endif // we need 1 more saved_key than is 'used'. This extra key is used // in SIMD code, for all part full grouped blocks. saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt + 1); saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt + 1); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); saved_ctx = mem_calloc(sizeof(*saved_ctx), self->params.max_keys_per_crypt); MixOrderLen = self->params.max_keys_per_crypt*MAX_KEYS_PER_CRYPT+MAX_KEYS_PER_CRYPT; MixOrder = mem_calloc(MixOrderLen, sizeof(int)); } static void done(void) { MEM_FREE(MixOrder); MEM_FREE(saved_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); #ifdef SIMD_COEF_32 while (salt_preload) { int i; for (i = 20; i >= 0; --i) MEM_FREE(salt_preload->ex_data[i]); MEM_FREE(salt_preload->first_blk); salt_preload = salt_preload->next; } #endif } #ifdef SIMD_COEF_32 static void link_salt(keystore_salt *ps) { const unsigned char *magic = (const unsigned char*)"Mighty Aphrodite"; const unsigned char *cpm; unsigned char *cpo; int threads=1; int j,k,t,idx; preload *p = salt_preload; #ifdef _OPENMP threads = omp_get_max_threads(); #endif while (p) { if (!memcmp(p->data_hash, ps->data_hash, 20)) { ps->ptr = p; return; } p = p->next; } p = (preload *)mem_alloc_tiny(sizeof(preload), 16); memset(p, 0, sizeof(preload)); memcpy(p->data_hash, ps->data_hash, 20); // make sure this salt was not already loaded. IF it is loaded, then // adjust the pointer in the salt-db record. p->first_blk = mem_calloc_align(threads, sizeof(*p->first_blk), MEM_ALIGN_SIMD); salt_mem_total += threads*sizeof(*p->first_blk); for (t = 0; t < threads; ++t) { // t is threads for (j = 0; j < 21; ++j) { // j is length-4 of candidate password // actual length of this full string to SHA1. unsigned bits, len = (j+4)*2+16+ps->data_length; cpo = (unsigned char*)p->first_blk[t][j]; for (idx = 0; idx < NBKEYS; ++idx) { cpm = magic; for (k = (j+4)*2; *cpm; ++k) { cpo[GETPOS(k, idx)] = *cpm++; } cpm = ps->data; while (k < 64) { cpo[GETPOS(k, idx)] = *cpm++; ++k; } } if (t==0) { // we only add 1 instance of the ex_data. for each // password length, since this data is read only. // All threads can share it. p->ex_data[j] = mem_calloc_align((len+8)/64+1, 64*NBKEYS, MEM_ALIGN_SIMD); salt_mem_total += ((len+8)/64+1)*64*NBKEYS; for (idx = 0; idx < NBKEYS; ++idx) { int x, z=64-((j+4)*2+16), x_full=0; cpm = ps->data; cpm += z; cpo = (unsigned char*)p->ex_data[j]; for (x=0; x+z < ps->data_length; ++x) { cpo[GETPOS(x, idx)] = *cpm++; if (x == 63) { x -= 64; cpo += 64*NBKEYS; z += 64; x_full += 64; } } cpo[GETPOS(x, idx)] = 0x80; x += x_full; p->n_ex[j] = x/64+1; if (x%64 > 55) { ++p->n_ex[j]; cpo += 64*NBKEYS; } // now put bit length; bits = len<<3; x = 63; while (bits) { cpo[GETPOS(x, idx)] = bits&0xFF; bits >>= 8; --x; } } } } } // link this preload record into our list. p->next = salt_preload; salt_preload = p; // Adjust salt record. ps->ptr = p; } #endif static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; SHA_CTX ctx; static void *ptr; keystore_salt cs; memset(&cs, 0, sizeof(keystore_salt)); ctcopy += FORMAT_TAG_LEN; /* skip over "$keystore$" */ p = strtokm(ctcopy, "$"); cs.target = atoi(p); p = strtokm(NULL, "$"); cs.data_length = atoi(p); p = strtokm(NULL, "$"); cs.data = mem_alloc_tiny(cs.data_length, 1); for (i = 0; i < cs.data_length; i++) { cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } // used as a way to later compare salts. It is ALSO the // hash for a 0 byte password for this salt. SHA1_Init(&ctx); SHA1_Update(&ctx, "Mighty Aphrodite", 16); SHA1_Update(&ctx, cs.data, cs.data_length); SHA1_Final(cs.data_hash, &ctx); #ifdef SIMD_COEF_32 link_salt(&cs); #endif p = strtokm(NULL, "$"); /* skip hash */ p = strtokm(NULL, "$"); cs.count = atoi(p); p = strtokm(NULL, "$"); cs.keysize = atoi(p); cs.keydata = mem_alloc_tiny(cs.keysize, 1); for (i = 0; i < cs.keysize; i++) cs.keydata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); // setup the dyna_salt stuff. cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(keystore_salt, data_length); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(keystore_salt, data_length, data, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_tiny(sizeof(keystore_salt), MEM_ALIGN_WORD); memcpy(ptr, &cs, sizeof(keystore_salt)); return (void *) &ptr; } static void set_salt(void *salt) { keystore_cur_salt = *(keystore_salt **) salt; #ifdef SIMD_COEF_32 cursimd = (preload*)keystore_cur_salt->ptr; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index, tot_todo; #ifdef SIMD_COEF_32 // in SIMD code, we need to sort by password length. NOTE, 0-3 and +24 // byte passwords 'all' group into the final group. Those are run 1 at // a time through CTX based code. int j, tot=0; tot_todo = 0; saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location. for (j = 0; j < 21 && tot<count; ++j) { for (index = 0; index < count; ++index) { if (saved_len[index] == j+4) { MixOrder[tot_todo++] = index; ++tot; } } while (tot_todo % MAX_KEYS_PER_CRYPT) MixOrder[tot_todo++] = count; } if (tot < count) { // these do not get SIMD usage. for (index = 0; index < count; ++index) { if (saved_len[index] < 4 || saved_len[index] > 24) { MixOrder[tot_todo] = index; ++tot; // we only want to do ONE password CTX mode // per loop through the thread. tot_todo += MAX_KEYS_PER_CRYPT; } } } #else // no need to mix. just run them one after the next, in any order. for (index = 0; index < count; ++index) MixOrder[index] = index; tot_todo = count; #endif index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT) { SHA_CTX ctx; #ifdef SIMD_COEF_32 int x, tid=0, len, idx; char tmp_sse_out[20*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD]; uint32_t *sse_out; sse_out = (uint32_t *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD); #ifdef _OPENMP tid = omp_get_thread_num(); #endif len = saved_len[MixOrder[index]]; if (len >= 4 && len <= 24) { unsigned char *po; po = (unsigned char*)cursimd->first_blk[tid][len-4]; for (x = 0; x < MAX_KEYS_PER_CRYPT; ++x) { int j; unsigned char *p; idx = MixOrder[index+x]; p = (unsigned char*)saved_key[idx]; for (j = 0; j < len; ++j) po[GETPOS(j*2+1,x)] = p[j]; } SIMDSHA1body(po, sse_out, NULL, SSEi_MIXED_IN); po = (unsigned char*)cursimd->ex_data[len-4]; for (x = 0; x < cursimd->n_ex[len-4]; ++x) { SIMDSHA1body(po, sse_out, sse_out, SSEi_MIXED_IN|SSEi_RELOAD); po += 64*MAX_KEYS_PER_CRYPT; } #ifdef SIMD_COEF_32 // we have to 'marshal' the data back into the SIMD output buf. // but we only marshal the first 4 bytes. for (x = 0; x < MAX_KEYS_PER_CRYPT; ++x) { idx = MixOrder[index+x]; if (idx < count) crypt_out[idx][0] = JOHNSWAP(sse_out[5*SIMD_COEF_32*(x/SIMD_COEF_32)+x%SIMD_COEF_32]); } #endif // we do NOT want to fall through. We handled this // SIMD block of data already. continue; } #endif if (dirty) getPreKeyedHash(MixOrder[index]); if (saved_len[MixOrder[index]] == 0) memcpy(crypt_out[MixOrder[index]], keystore_cur_salt->data_hash, 20); else { memcpy(&ctx, &saved_ctx[MixOrder[index]], sizeof(ctx)); SHA1_Update(&ctx, keystore_cur_salt->data, keystore_cur_salt->data_length); SHA1_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx); } } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_exact(char *source, int index) { unsigned char *binary = (unsigned char *)keystore_common_get_binary(source); #ifdef SIMD_COEF_32 // in SIMD, we only have the first 4 bytes copied into the binary buffer. // to for a cmp_one, so we do a full CTX type check SHA_CTX ctx; getPreKeyedHash(index); memcpy(&ctx, &saved_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, keystore_cur_salt->data, keystore_cur_salt->data_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); #endif return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void keystore_set_key(char *key, int index) { saved_len[index] = strlen(key); strcpy(saved_key[index], key); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_keystore = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, /* FIXME: report keystore_cur_salt->data_length as tunable cost? */ { NULL }, { FORMAT_TAG }, keystore_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, keystore_common_valid_cpu, fmt_default_split, keystore_common_get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, keystore_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
box3d1r.c
#define BENCH_DIM 3 #define BENCH_FPP 53 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.0375f*A[t%2][i-1][j][k] + 0.0371f*A[t%2][i-1][j-1][k-1] + 0.0372f*A[t%2][i-1][j-1][k] + 0.0373f*A[t%2][i-1][j-1][k+1] + 0.0374f*A[t%2][i-1][j][k-1] + 0.0376f*A[t%2][i-1][j][k+1] + 0.0377f*A[t%2][i-1][j+1][k-1] + 0.0378f*A[t%2][i-1][j+1][k] + 0.0379f*A[t%2][i-1][j+1][k+1] + 0.0355f*A[t%2][i][j][k] + 0.0351f*A[t%2][i][j-1][k-1] + 0.0352f*A[t%2][i][j-1][k] + 0.0353f*A[t%2][i][j-1][k+1] + 0.0354f*A[t%2][i][j][k-1] + 0.0356f*A[t%2][i][j][k+1] + 0.0357f*A[t%2][i][j+1][k-1] + 0.0358f*A[t%2][i][j+1][k] + 0.0359f*A[t%2][i][j+1][k+1] + 0.0365f*A[t%2][i+1][j][k] + 0.0361f*A[t%2][i+1][j-1][k-1] + 0.0362f*A[t%2][i+1][j-1][k] + 0.0363f*A[t%2][i+1][j-1][k+1] + 0.0364f*A[t%2][i+1][j][k-1] + 0.0366f*A[t%2][i+1][j][k+1] + 0.0367f*A[t%2][i+1][j+1][k-1] + 0.0368f*A[t%2][i+1][j+1][k] + 0.0369f*A[t%2][i+1][j+1][k+1]; #pragma endscop } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.0375f*A[t%2][i-1][j][k] + 0.0371f*A[t%2][i-1][j-1][k-1] + 0.0372f*A[t%2][i-1][j-1][k] + 0.0373f*A[t%2][i-1][j-1][k+1] + 0.0374f*A[t%2][i-1][j][k-1] + 0.0376f*A[t%2][i-1][j][k+1] + 0.0377f*A[t%2][i-1][j+1][k-1] + 0.0378f*A[t%2][i-1][j+1][k] + 0.0379f*A[t%2][i-1][j+1][k+1] + 0.0355f*A[t%2][i][j][k] + 0.0351f*A[t%2][i][j-1][k-1] + 0.0352f*A[t%2][i][j-1][k] + 0.0353f*A[t%2][i][j-1][k+1] + 0.0354f*A[t%2][i][j][k-1] + 0.0356f*A[t%2][i][j][k+1] + 0.0357f*A[t%2][i][j+1][k-1] + 0.0358f*A[t%2][i][j+1][k] + 0.0359f*A[t%2][i][j+1][k+1] + 0.0365f*A[t%2][i+1][j][k] + 0.0361f*A[t%2][i+1][j-1][k-1] + 0.0362f*A[t%2][i+1][j-1][k] + 0.0363f*A[t%2][i+1][j-1][k+1] + 0.0364f*A[t%2][i+1][j][k-1] + 0.0366f*A[t%2][i+1][j][k+1] + 0.0367f*A[t%2][i+1][j+1][k-1] + 0.0368f*A[t%2][i+1][j+1][k] + 0.0369f*A[t%2][i+1][j+1][k+1]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
GB_binop__times_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__times_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp64) // A*D function (colscale): GB (_AxD__times_fp64) // D*A function (rowscale): GB (_DxB__times_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__times_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__times_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp64) // C=scalar+B GB (_bind1st__times_fp64) // C=scalar+B' GB (_bind1st_tran__times_fp64) // C=A+scalar GB (_bind2nd__times_fp64) // C=A'+scalar GB (_bind2nd_tran__times_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FP64 || GxB_NO_TIMES_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
window.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window.c * Description : window * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_c__ #define __libaroma_window_c__ #include <aroma_internal.h> #include "ui_internal.h" /* check wm macro */ #define __CHECK_WM(RETVAL) \ if (libaroma_wm()==NULL){ \ ALOGW("window manager uninitialized"); \ return RETVAL; \ } /* * Variable : _libaroma_window_measurement_dp * Type : byte * Descriptions: default measurement */ static byte _libaroma_window_measurement_dp=1; /* * Function : libaroma_window_usedp * Return Value: byte * Descriptions: use dp for measurement */ byte libaroma_window_usedp(byte isdp){ if (isdp==1){ _libaroma_window_measurement_dp=1; } else if (!isdp){ _libaroma_window_measurement_dp=0; } return _libaroma_window_measurement_dp; } /* End of libaroma_window_usedp */ /* * Function : libaroma_window_measure_point * Return Value: int * Descriptions: mesure point */ int libaroma_window_measure_point(int x){ if (_libaroma_window_measurement_dp){ return libaroma_dp(x); } return x; } /* End of libaroma_window_measure_point */ /* * Function : _libaroma_window_measure_save * Return Value: void * Descriptions: save measurement value */ void _libaroma_window_measure_save(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win!=NULL){ if (_libaroma_window_measurement_dp){ win->left = libaroma_px(win->x); win->top = libaroma_px(win->y); win->width= libaroma_px(win->w); win->height= libaroma_px(win->h); } else{ win->left = win->x; win->top = win->y; win->width= win->w; win->height= win->h; } } if (ctl!=NULL){ if (_libaroma_window_measurement_dp){ ctl->left = libaroma_px(ctl->x); ctl->top = libaroma_px(ctl->y); ctl->width= libaroma_px(ctl->w); ctl->height= libaroma_px(ctl->h); } else{ ctl->left = ctl->x; ctl->top = ctl->y; ctl->width= ctl->w; ctl->height= ctl->h; } } } /* End of _libaroma_window_measure_save */ /* * Function : libaroma_window_measure_calculate * Return Value: int * Descriptions: calculate measurement */ int libaroma_window_measure_calculate( int cv, int pos, int max, int is_size, int x){ if (is_size){ if (pos<=0){ switch (pos){ case LIBAROMA_POS_HALF: return (max / 2)-x; break; case LIBAROMA_POS_1P3: return (max / 3)-x; break; case LIBAROMA_POS_2P3: return (max * 2 / 3)-x; break; case LIBAROMA_POS_1P4: return (max / 4)-x; break; case LIBAROMA_POS_3P4: return (max * 3 / 4)-x; break; case LIBAROMA_SIZE_FULL: return max; break; case LIBAROMA_SIZE_HALF: return max / 2; break; case LIBAROMA_SIZE_THIRD: return max / 3; break; case LIBAROMA_SIZE_QUARTER: return max / 4; break; default: return abs(pos); } } } else{ if (pos<0){ switch (pos){ case LIBAROMA_POS_HALF: return max / 2; break; case LIBAROMA_POS_1P3: return max / 3; break; case LIBAROMA_POS_2P3: return max * 2 / 3; break; case LIBAROMA_POS_1P4: return max / 4; break; case LIBAROMA_POS_3P4: return max * 3 / 4; break; default: return abs(pos); } } } return cv; } /* End of libaroma_window_measure_calculate */ /* * Function : libaroma_window_measure_size * Return Value: byte * Descriptions: measure window size */ byte libaroma_window_measure_size(LIBAROMA_WINDOWP win){ if (win){ if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } if (_libaroma_window_measurement_dp){ win->x = libaroma_dp(win->rx); win->y = libaroma_dp(win->ry); win->w = libaroma_dp(win->rw); win->h = libaroma_dp(win->rh); } else{ win->x = win->rx; win->y = win->ry; win->w = win->rw; win->h = win->rh; } win->ax=win->x; win->ay=win->y; win->x=libaroma_window_measure_calculate( win->x, win->rx, libaroma_wm()->w, 0, 0 ); win->y=libaroma_window_measure_calculate( win->y, win->ry, libaroma_wm()->h, 0, 0 ); win->w=libaroma_window_measure_calculate( win->w, win->rw, libaroma_wm()->w, 1, win->x ); win->h=libaroma_window_measure_calculate( win->h, win->rh, libaroma_wm()->h, 1, win->y ); if (win->w+win->x>libaroma_wm()->w){ win->w = libaroma_wm()->w-win->x; } if (win->h+win->y>libaroma_wm()->h){ win->h = libaroma_wm()->h-win->y; } _libaroma_window_measure_save(win,NULL); LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } return 0; } /* End of libaroma_window_measure */ /* * Function : _libaroma_window_ui_thread * Return Value: byte * Descriptions: window ui thread */ byte _libaroma_window_ui_thread(LIBAROMA_WINDOWP win) { int i; byte need_sync = 0; if (win->active==1){ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (c->handler->thread(c)){ if (libaroma_control_draw(c,0)){ libaroma_wm_updatesync( c->x+win->x, c->y+win->y, c->w, c->h, 0 ); need_sync=1; } } } } } return need_sync; } /* End of _libaroma_window_ui_thread */ /* * Function : libaroma_window * Return Value: LIBAROMA_WINDOWP * Descriptions: new window */ LIBAROMA_WINDOWP libaroma_window( char * bg_theme_name, int x, int y, int w, int h ){ __CHECK_WM(NULL); LIBAROMA_WINDOWP win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!win){ ALOGW("libaroma_window alloc window data failed"); return NULL; } if (bg_theme_name){ snprintf(win->theme_bg,256,"%s",bg_theme_name); } else{ win->theme_bg[0]=0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; win->ui_thread = _libaroma_window_ui_thread; libaroma_window_measure_size(win); return win; } /* End of libaroma_window */ /* * Function : libaroma_window_free * Return Value: byte * Descriptions: free window */ byte libaroma_window_free( LIBAROMA_WINDOWP win ){ __CHECK_WM(0); if (win==NULL){ return 0; } /* inactivate it */ if (win->parent==NULL){ if (libaroma_wm_get_active_window()==win){ /* detach active window from window manager */ libaroma_wm_set_active_window(NULL); } LIBAROMA_MSG _msg; libaroma_window_process_event(win, libaroma_wm_compose(&_msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0)); } if (win->handler!=NULL){ if (win->handler->prefree!=NULL){ win->handler->prefree(win); } } /* delete childs */ int i; if (win->childn>0){ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_free(win->childs[i]); } free(win->childs); } if (win->bg){ libaroma_canvas_free(win->bg); win->bg=NULL; } if (win->dc){ libaroma_canvas_free(win->dc); win->dc=NULL; } if (win->handler!=NULL){ if (win->handler->postfree!=NULL){ win->handler->postfree(win); } } free(win); return 1; } /* End of libaroma_window_free */ /* * Function : _libaroma_window_updatebg * Return Value: byte * Descriptions: update window background */ byte _libaroma_window_updatebg(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (win->handler!=NULL){ if (win->handler->updatebg!=NULL){ if (win->handler->updatebg(win)){ if (win->onupdatebg){ win->onupdatebg(win,win->bg); } return 1; } return 0; } } if (win->parent!=NULL){ return 0; } int w = win->w; int h = win->h; /* draw background */ if (win->bg!=NULL){ if ((win->bg->w==w)&&(win->bg->h==h)){ /* not need recreate background */ return 1; } libaroma_canvas_free(win->bg); } win->bg = libaroma_canvas(w,h); /* default canvas color */ libaroma_canvas_setcolor( win->bg, libaroma_colorget(NULL,win)->window_bg, 0xff ); /* from theme canvas */ if (win->theme_bg[0]!=0){ libaroma_wm_draw_theme( win->bg, win->theme_bg, 0, 0, win->bg->w, win->bg->h, NULL ); } /* from updatebg callback */ if (win->onupdatebg!=NULL){ win->onupdatebg(win,win->bg); } return 1; } /* End of _libaroma_window_updatebg */ /* * Function : _libaroma_window_recalculate * Return Value: byte * Descriptions: recalculate client size */ byte _libaroma_window_recalculate(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (libaroma_window_isactive(win)){ _libaroma_window_updatebg(win); libaroma_window_invalidate(win, 1); } return 1; } /* End of _libaroma_window_recalculate */ /* * Function : _libaroma_window_ready * Return Value: byte * Descriptions: window is ready */ byte _libaroma_window_ready(LIBAROMA_WINDOWP win){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_resize win is NULL"); return 0; } int x = win->x; int y = win->y; int w = win->w; int h = win->h; if (w==0){ w = libaroma_wm()->w; x = 0; } if (h==0){ h = libaroma_wm()->h; y = 0; } /* set position */ if (win->dc!=NULL){ libaroma_canvas_free(win->dc); win->dc=NULL; } win->dc= libaroma_wm_canvas(x, y, w, h); if (win->dc==NULL){ ALOGW("window_ready cannot allocate workspace drawing canvas"); return 0; } if (libaroma_window_isactive(win)){ libaroma_wm_clean_workspace(); } win->x = x; win->y = y; win->w = win->dc->w; win->h = win->dc->h; _libaroma_window_measure_save(win,NULL); _libaroma_window_recalculate(win); return 1; } /* End of _libaroma_window_ready */ /* * Function : libaroma_window_resize * Return Value: byte * Descriptions: resize window */ byte libaroma_window_resize( LIBAROMA_WINDOWP win, int x, int y, int w, int h ){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; if (libaroma_window_measure_size(win)){ return _libaroma_window_ready(win); } return 0; } /* End of libaroma_window_resize */ /* * Function : libaroma_window_isactive * Return Value: byte * Descriptions: check if window is active */ byte libaroma_window_isactive(LIBAROMA_WINDOWP win){ if (win!=NULL){ LIBAROMA_WINDOWP w = win; while(w->parent){ w=w->parent; } return ((w==libaroma_wm_get_active_window())?1:0); } return 0; } /* End of libaroma_window_isactive */ /* * Function : libaroma_window_add * Return Value: byte * Descriptions: add control into window */ byte libaroma_window_add( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_add win is NULL"); return 0; } if (ctl==NULL){ ALOGW("window_add ctl is NULL"); return 0; } if (ctl->window != NULL){ ALOGW("window_add ctl already have window"); return 0; } libaroma_window_measure(win, ctl); if (win->childn==0){ win->childs = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)); if (!win->childs){ ALOGW("window_add malloc failed"); win->childs=NULL; return 0; } win->childs[0]=ctl; } else{ LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) realloc(win->childs, sizeof(LIBAROMA_CONTROLP)*(win->childn+1)); if (!newchilds){ ALOGW("window_add realloc failed"); return 0; } win->childs = newchilds; win->childs[win->childn] = ctl; } ctl->window = win; win->childn++; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_add */ /* * Function : libaroma_window_del * Return Value: byte * Descriptions: delete control from window */ byte libaroma_window_del( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (ctl==NULL){ ALOGW("window_del ctl is null"); return 0; } if (win==NULL){ ALOGW("window_del win is null"); return 0; } if (win != ctl->window){ return 0; } if (win->childn<=0){ ALOGW("window_del window data corrupt doesn't have childs??"); return 0; } else if (win->childn==1){ if (win->childs[0]==ctl){ ctl->window = NULL; win->childn=0; free(win->childs); win->childs=NULL; _libaroma_window_recalculate(win); return 1; } else{ ALOGW("window_del ctl not found in window"); return 0; } } LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)*(win->childn-1)); if (!newchilds){ ALOGW("window_del malloc temp childs failed"); return 0; } int j = 0; int i; for (i=0;i<win->childn;i++){ if (win->childs[i]!=ctl){ if (j==win->childn-1){ /* current ctl not found */ free(newchilds); ALOGW("window_del ctl not found in window"); return 0; } newchilds[j++]=win->childs[i]; } } free(win->childs); win->childs=newchilds; win->childn--; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_del */ /* * Function : libaroma_window_measure * Return Value: byte * Descriptions: measure control size */ byte libaroma_window_measure(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win&&ctl){ if (_libaroma_window_measurement_dp){ ctl->x = libaroma_dp(ctl->rx); ctl->y = libaroma_dp(ctl->ry); ctl->w = libaroma_dp(ctl->rw); ctl->h = libaroma_dp(ctl->rh); } else{ ctl->x = ctl->rx; ctl->y = ctl->ry; ctl->w = ctl->rw; ctl->h = ctl->rh; } ctl->x=libaroma_window_measure_calculate( ctl->x, ctl->rx, win->w, 0, 0 ); ctl->y=libaroma_window_measure_calculate( ctl->y, ctl->ry, win->h, 0, 0 ); ctl->w=libaroma_window_measure_calculate( ctl->w,ctl->rw, win->w, 1, ctl->x ); ctl->h=libaroma_window_measure_calculate( ctl->h,ctl->rh, win->h, 1, ctl->y ); if (ctl->w+ctl->x>win->w){ ctl->w = win->w-ctl->x; } if (ctl->h+ctl->y>win->h){ ctl->h = win->h-ctl->y; } if (ctl->w<ctl->minw){ ctl->w=ctl->minw; } if (ctl->h<ctl->minh){ ctl->h=ctl->minh; } _libaroma_window_measure_save(NULL,ctl); if (ctl->handler->message){ LIBAROMA_MSG _msg; ctl->handler->message(ctl, libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } } return 0; } /* End of libaroma_window_measure */ /* * Function : libaroma_window_attach * Return Value: LIBAROMA_CONTROLP * Descriptions: attach control into window */ LIBAROMA_CONTROLP libaroma_window_attach( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ /* attach into window */ if (win){ if (libaroma_window_add(win,ctl)){ return ctl; } ALOGW("window_attach cannot attach into window"); libaroma_control_free(ctl); return NULL; } return ctl; } /* End of libaroma_window_attach */ /* * Function : libaroma_window_getid * Return Value: LIBAROMA_CONTROLP * Descriptions: get control by id */ LIBAROMA_CONTROLP libaroma_window_getid( LIBAROMA_WINDOWP win, word id){ __CHECK_WM(NULL); if (win==NULL){ ALOGW("window_control_id win is null"); return NULL; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->id==id){ return win->childs[i]; } } return NULL; /* not found */ } /* End of libaroma_window_getid */ /* * Function : libaroma_window_setfocus * Return Value: LIBAROMA_CONTROLP * Descriptions: set control focus */ LIBAROMA_CONTROLP libaroma_window_setfocus( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win==NULL){ ALOGW("window_setfocus window is null"); return NULL; } if (ctl!=NULL){ /* set */ if (win!=ctl->window){ ALOGW("window_setfocus control is not window child"); return NULL; } if (ctl->handler->focus!=NULL){ if (win->focused==ctl){ return ctl; } if (ctl->handler->focus(ctl,1)){ if (win->focused){ win->focused->handler->focus(win->focused,0); } win->focused=ctl; return ctl; } } return NULL; } else{ /* find focus */ if (win->focused){ return win->focused; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->focus!=NULL){ return libaroma_window_setfocus(win,win->childs[i]); } } } return NULL; } /* End of libaroma_window_setfocus */ /* * Function : libaroma_window_sync * Return Value: byte * Descriptions: sync window canvas */ byte libaroma_window_sync(LIBAROMA_WINDOWP win, int x, int y, int w, int h){ __CHECK_WM(0); if (win==NULL){ ALOGW("libaroma_window_sync win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->sync!=NULL){ return win->handler->sync(win,x,y,w,h); } } if (win->parent!=NULL){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of libaroma_window_sync */ /* * Function : libaroma_window_invalidate * Return Value: byte * Descriptions: invalidate window drawing */ byte libaroma_window_invalidate(LIBAROMA_WINDOWP win, byte sync){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_invalidate win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->invalidate!=NULL){ return win->handler->invalidate(win,sync); } } if (win->parent!=NULL){ return 0; } if (!libaroma_window_isactive(win)){ ALOGW("window_invalidate win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } if ((!win->lock_sync)||(sync==10)){ /* draw bg */ libaroma_draw( win->dc, win->bg, 0, 0, 1); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ /* draw no sync */ libaroma_control_draw(win->childs[i], 0); } /* sync */ if (sync){ libaroma_window_sync(win, 0, 0, win->w, win->h); } } return 1; } /* End of libaroma_window_invalidate */ /* * Function : libaroma_window_anishow * Return Value: byte * Descriptions: show window - animated */ byte libaroma_window_anishow( LIBAROMA_WINDOWP win, byte animation, int duration){ __CHECK_WM(0); if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("Child window cannot shown directly..."); return 0; } /* set initial focus libaroma_window_setfocus(win, NULL); */ if ((!animation)||(duration<50)){ return libaroma_wm_set_active_window(win); } /* lock and retval */ byte retval = 0; win->lock_sync = 1; if (libaroma_wm_set_active_window(win)){ win->active=2; /* draw window into temp canvas */ LIBAROMA_CANVASP wmc = win->dc; LIBAROMA_CANVASP tdc = libaroma_canvas(wmc->w,wmc->h); libaroma_draw(tdc,wmc,0,0,0); win->dc=tdc; /* switch dc */ LIBAROMA_CANVASP back = libaroma_canvas(wmc->w, wmc->h); libaroma_draw(back,wmc,0,0,0); /* invalidate now */ libaroma_window_invalidate(win, 10); long start = libaroma_tick(); int delta = 0; while ((delta=libaroma_tick()-start)<duration){ float state = ((float) delta)/((float) duration); if (state>=1.0){ break; } switch (animation){ case LIBAROMA_WINDOW_SHOW_ANIMATION_FADE: { float swift_out_state = libaroma_cubic_bezier_swiftout(state); float bstate = 255.0 * swift_out_state; byte bbstate = (byte) round(bstate); libaroma_draw_opacity( wmc, win->dc,0,0,0,bbstate ); //libaroma_window_sync(win, 0, 0, win->w, win->h); libaroma_wm_sync(win->x,win->y,win->w,win->h); } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP: case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_TOP: { float swift_out_state = libaroma_cubic_bezier_swiftout(state); int y = win->h - (swift_out_state * win->h); int h = win->h - y; if (h>0){ if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP){ if (h<win->h){ libaroma_draw_ex( wmc, back, 0, 0, 0, win->h - (win->h - h), win->w, win->h-h, 0, 0xff ); } } libaroma_draw_ex( wmc, win->dc, 0, y, 0, 0, win->w, h, 0, 0xff ); if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_TOP){ libaroma_wm_sync(win->x,win->y,win->w,win->h); } else{ libaroma_wm_sync(win->x,win->y+y,win->w, h); } } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM: case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_BOTTOM: { float swift_out_state = libaroma_cubic_bezier_swiftout(state); int y = 0 - (win->h - (swift_out_state * win->h)); int h = win->h + y; if (h>0){ if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM){ if (h<win->h){ libaroma_draw_ex( wmc, back, 0, h, 0, 0, win->w, win->h-h, 0, 0xff ); } } libaroma_draw_ex( wmc, win->dc, 0, 0, 0, win->h-h, win->w, h, 0, 0xff ); if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_BOTTOM){ libaroma_wm_sync(win->x,win->y,win->w,win->h); } else{ libaroma_wm_sync(win->x,win->y,win->w,h); } } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT: case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_LEFT: { float swift_out_state = libaroma_cubic_bezier_swiftout(state); int x = win->w - (swift_out_state * win->w); int w = win->w - x; if (w>0){ if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT){ if (w<win->w){ libaroma_draw_ex( wmc, back, 0, 0, win->w - (win->w - w), 0, win->w - w, win->h, 0, 0xff ); } } libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_LEFT){ libaroma_wm_sync(win->x,win->y,win->w,win->h); } else{ libaroma_wm_sync(win->x+x,win->y,w, win->h); } } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT: case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_RIGHT: { float swift_out_state = libaroma_cubic_bezier_swiftout(state); int x = 0 - (win->w - (swift_out_state * win->w)); int w = win->w + x; if (w>0){ if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT){ if (w<win->w){ libaroma_draw_ex( wmc, back, w, 0, 0, 0, win->w - w, win->h, 0, 0xff ); } } libaroma_draw_ex( wmc, win->dc, 0, 0, win->w-w, 0, w, win->h, 0, 0xff ); if (animation==LIBAROMA_WINDOW_SHOW_ANIMATION_SLIDE_RIGHT){ libaroma_wm_sync(win->x,win->y,win->w,win->h); } else{ libaroma_wm_sync(win->x,win->y,w, win->h); } } } break; default: /* invalid animation */ start=0; break; } } retval = 1; libaroma_draw(wmc,win->dc,0,0,0); win->dc=wmc; /* switch dc back */ /* cleanup */ libaroma_canvas_free(back); libaroma_canvas_free(tdc); } win->lock_sync = 0; /* sync view now */ if (retval){ win->active=1; // libaroma_window_sync(win, 0, 0, win->w, win->h); libaroma_wm_sync(win->x,win->y,win->w,win->h); /* send activate */ LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 10, 0) ); } return retval; } /* End of libaroma_window_show */ /* * Function : libaroma_window_calculate_pos * Return Value: void * Descriptions: calculate screen position to window/control position */ void libaroma_window_calculate_pos( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (win!=NULL){ *x-=win->x; *y-=win->y; } else if ((ctl!=NULL)&&(ctl->window!=NULL)){ *x-=ctl->window->x; *y-=ctl->window->y; } if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; } /* *x-=libaroma_wm()->x; *y-=libaroma_wm()->y; */ } /* End of libaroma_window_calculate_pos */ /* * Function : libaroma_window_calculate_pos_abs * Return Value: void * Descriptions: calculate absolute screen position to top window position */ void libaroma_window_calculate_pos_abs( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; win=ctl->window; } while (win!=NULL){ *x-=win->ax; *y-=win->ay; win=win->parent; } } /* End of libaroma_window_calculate_pos_abs */ /* * Function : _libaroma_window_is_inside * Return Value: byte * Descriptions: check position coordinate */ byte _libaroma_window_is_inside(LIBAROMA_CONTROLP ctl, int x, int y) { int wx = ctl->x; int wx2 = wx + ctl->w; int wy = ctl->y; int wy2 = wy + ctl->h; if ((x >= wx) && (x < wx2) && (y >= wy) && (y < wy2)) { return 1; } return 0; } /* End of _libaroma_window_is_inside */ /* * Function : libaroma_window_post_command * Return Value: byte * Descriptions: post direct command */ byte libaroma_window_post_command(dword cmd){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, 0, 0, (int) cmd, 0, NULL ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_post_command_ex * Return Value: byte * Descriptions: post direct command extended */ byte libaroma_window_post_command_ex(dword cmd, byte state, int key, int y, voidp d){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, state, key, (int) cmd, y, d ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_process_event * Return Value: dword * Descriptions: process message */ dword libaroma_window_process_event(LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_event win is null"); return 0; } if (win->parent!=NULL){ ALOGW("window_event cannot used for child window..."); return 0; } dword ret = 0; if (win->handler){ if (win->handler->message_hooker){ if (win->handler->message_hooker(win,msg,&ret)){ return ret; } } } switch (msg->msg){ case LIBAROMA_MSG_WIN_ACTIVE: { /* set current window size */ win->focused=NULL; win->touched=NULL; if (msg->x!=10){ _libaroma_window_ready(win); } if ((!win->lock_sync)||(msg->x==10)){ if ((!win->active)||(msg->x==10)){ int i; win->active=1; /* signal child */ for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; _libaroma_window_ready(win); for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_INACTIVE: { if (win->active){ /* stop thread manager */ win->active=0; /* send inactive message to child */ int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } win->focused=NULL; win->touched=NULL; } } break; case LIBAROMA_MSG_WIN_MEASURED: { /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; case LIBAROMA_MSG_WIN_DIRECTMSG: { return (dword) msg->x; } break; case LIBAROMA_MSG_WIN_INVALIDATE: { libaroma_window_invalidate(win, 1); } break; case LIBAROMA_MSG_TOUCH: { /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ win->touched = NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ win->touched = win->childs[i]; break; } } if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } } break; } return ret; } /* End of libaroma_window_process_event */ /* * Function : libaroma_window_pool * Return Value: dword * Descriptions: poll window messages */ dword libaroma_window_pool( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("cannot pool child window..."); return 0; } LIBAROMA_MSG _msg; LIBAROMA_MSGP cmsg=(msg!=NULL)?msg:&_msg; byte ret = libaroma_wm_getmessage(cmsg); if (ret){ return libaroma_window_process_event(win,cmsg); } return 0; } /* End of libaroma_window_pool */ #undef __CHECK_WM #endif /* __libaroma_window_c__ */
wrapfftw.c
#include <stdio.h> #include <stdlib.h> #include "hpccfft.h" #ifdef _OPENMP #include <omp.h> #endif hpcc_fftw_plan HPCC_fftw_create_plan(int n, fftw_direction dir, int flags) { hpcc_fftw_plan p; fftw_complex *a = NULL, *b = NULL; p = (hpcc_fftw_plan)fftw_malloc( sizeof *p ); if (! p) return p; p->w1 = (fftw_complex *)fftw_malloc( (FFTE_NDA2/2 + FFTE_NP) * (sizeof *p->w1) ); p->w2 = (fftw_complex *)fftw_malloc( (FFTE_NDA2/2 + FFTE_NP) * (sizeof *p->w2) ); p->ww = (fftw_complex *)fftw_malloc( ((FFTE_NDA2+FFTE_NP) * 4 + FFTE_NP) * (sizeof *p->ww) ); p->c_size = (FFTE_NDA2+FFTE_NP) * (FFTE_NBLK + 1) + FFTE_NP; #ifdef _OPENMP #pragma omp parallel { #pragma omp single { int i; i = omp_get_num_threads(); p->c = (fftw_complex *)fftw_malloc( p->c_size * (sizeof *p->c) * i ); } } #else p->c = (fftw_complex *)fftw_malloc( p->c_size * (sizeof *p->c) ); #endif if (! p->w1 || ! p->w2 || ! p->ww || ! p->c) { if (p->c) fftw_free( p->c ); if (p->ww) fftw_free( p->ww ); if (p->w2) fftw_free( p->w2 ); if (p->w1) fftw_free( p->w1 ); fftw_free( p ); return NULL; } HPCC_zfft1d( n, a, b, 0, p ); p->n = n; p->dir = dir; p->flags = flags; return p; } void HPCC_fftw_destroy_plan(hpcc_fftw_plan p) { if (! p) return; fftw_free( p->c ); fftw_free( p->ww ); fftw_free( p->w2 ); fftw_free( p->w1 ); fftw_free( p ); } /* Without additional storage of size p->n there is no way to preserve FFTW 2 semantics (the `in' vector is not modified). But it doesn't matter for the calling code: it doesn't rely on this semantics. The change in semantics occured while going from FFTE 3.3 to FFTE 4.0. */ void HPCC_fftw_one(hpcc_fftw_plan p, fftw_complex *in, fftw_complex *out) { int i, n; if (FFTW_FORWARD == p->dir) HPCC_zfft1d( p->n, in, out, -1, p ); else HPCC_zfft1d( p->n, in, out, +1, p ); n = p->n; /* Copy the transform to `out' vector. */ for (i = 0; i < n; ++i) { c_assgn( out[i], in[i] ); } }
V3_openmp.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <pthread.h> #include <omp.h> #include <time.h> #include "../lib/mmio.h" #include "../lib/triangles_library.h" //V3 ALGORITHM USING OPENMP void V3_algorithm_openmp(uint32_t *csc_col,uint32_t *csc_row, uint32_t *c3, int n); int main(int argc, char *argv[]) { //#################Read the sparse matrix from file.################# int ret_code; MM_typecode matcode; FILE *f; int M, N, nz; if (argc < 2){ fprintf(stderr, "Usage: %s [martix-market-filename]\n", argv[0]); exit(1); }else{ if ((f = fopen(argv[1], "r")) == NULL){ printf("ERROR: Cannot process the file\n" ); exit(1); } } if (mm_read_banner(f, &matcode) != 0) { printf("Could not process Matrix Market banner.\n"); exit(1); } if (mm_is_complex(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode) ) { printf("Sorry, this application does not support "); printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode)); exit(1); } /* find out size of sparse matrix .... */ if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) !=0){ printf("Sorry, this application does not support "); exit(1); } /* reseve memory for matrices */ uint32_t *I, *J; I = (uint32_t *) malloc(2*nz * sizeof(uint32_t)); J = (uint32_t *) malloc(2*nz * sizeof(uint32_t)); /*read the market matrix file*/ for (uint32_t i=0; i<2*nz; i=i+2) { if(fscanf(f, "%u %u \n", &I[i], &J[i])); /* adjust from 1-based to 0-based */ I[i]--; J[i]--; //create the symmetric matrix. I[i+1]=J[i]; J[i+1]=I[i]; } if (f !=stdin) fclose(f); //#################CSC FORMAT FOR THE SPARSE MATRIX################# uint32_t *csc_col= (uint32_t *)malloc((N + 1) * sizeof(uint32_t)); uint32_t *csc_row= (uint32_t *)malloc(2*nz * sizeof(uint32_t)); uint32_t *c3 = (uint32_t *)malloc(N*sizeof(uint32_t)); coo2csc(csc_row, csc_col, I, J, (uint32_t)2*nz, (uint32_t)N,0); //initialize c3's matrices for(int i = 0;i<N;i++){ c3[i]=0; } /*timespec variables to count the total time of execution */ struct timespec ts_start; struct timespec ts_end; printf("====================PARALLEL V3 ALGORITHM USING OPENMP==================== \n"); clock_gettime(CLOCK_MONOTONIC, &ts_start); if(N<100){ printf("FEW NODES THUS SEQUENTIAL ALGORITHM IS SELECTED \n"); V3_algorithm(csc_col,csc_row,c3,N); } else V3_algorithm_openmp(csc_col,csc_row,c3,N); clock_gettime(CLOCK_MONOTONIC, &ts_end); //#######################WRITE RESULTS TO FILE AND EXIT####################### char str[200]; snprintf(str,sizeof(str),"V3_OPENMP.txt"); double time = 1000000*(double)(ts_end.tv_sec-ts_start.tv_sec)+(double)(ts_end.tv_nsec-ts_start.tv_nsec)/1000; write_to_file(str,c3,N,time); printf("RESULTS HAVE BEEN WRITTEN\n"); printf("EXITING...\n"); free(c3); free(csc_col); free(csc_row); free(I); free(J); return 0; } //V3 ALGORITHM USING OPENMP void V3_algorithm_openmp(uint32_t *csc_col,uint32_t *csc_row, uint32_t *c3, int n){ uint32_t i,j,k; uint32_t temp1,temp2; //parallel implementation of V3 algorithm. //set number of threads. Better dont set it manually //int n_threads; //omp_set_dynamic(0); //omp_set_num_threads(16); #pragma omp parallel shared(csc_col,csc_row,c3) private(i,j,k,temp1,temp2) firstprivate(n) { #pragma omp for schedule(auto) nowait for (i = 0;i<n;i++){ //n_threads = omp_get_num_threads(); //printf("NUMBER OF THREADS n:%d \n",n_threads); for(temp1 = 0;temp1<csc_col[i+1]-csc_col[i];temp1++){ j = csc_row[csc_col[i]+temp1]; if(j<i+1)continue; for(temp2=0;temp2<csc_col[j+1]-csc_col[j];temp2++){ k = csc_row[csc_col[j]+temp2]; if(k<j+1)continue; if(binary_search(k,i,temp1,csc_col,csc_row) !=0 ){ #pragma omp critical { c3[i]++; c3[j]++; c3[k]++; } } } } } } }
computepi.c
#include "computepi.h" #include <immintrin.h> #include <math.h> #include <omp.h> #include <stdint.h> #include <stdio.h> double compute_pi_baseline(size_t N) { double pi = 0.0; double dt = 1.0 / N; // dt = (b-a)/N, b = 1, a = 0 for (size_t i = 0; i < N; i++) { double x = (double) i / N; // x = ti = a+(b-a)*i/N = i/N pi += dt / (1.0 + x * x); // integrate 1/(1+x^2), i = 0....N } return pi * 4.0; } double compute_pi_openmp(size_t N, int threads) { double pi = 0.0; double dt = 1.0 / N; double x; #pragma omp parallel num_threads(threads) { /** * for : Causes the work done in a for loop inside a parallel region to * be divided among threads. parallel : Specifies that code under a * parallelized for loop should be executed like a sequential loop. * num_threads : Sets the number of threads in a thread team. * private : Sepcifies that each thread should have its own isntance of * a variable reduction : Specifies that one or more variables that are * private to each thread are the subject of a reduction operation at * the end of the parallel region. */ #pragma omp for private(x) reduction(+ : pi) for (size_t i = 0; i < N; i++) { x = (double) i / N; pi += dt / (1.0 + x * x); } } return pi * 4.0; } double compute_pi_avx(size_t N) { double pi = 0.0; double dt = 1.0 / N; register __m256d ymm0, ymm1, ymm2, ymm3, ymm4; ymm0 = _mm256_set1_pd(1.0); ymm1 = _mm256_set1_pd(dt); ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0); ymm4 = _mm256_setzero_pd(); // sum of pi for (int i = 0; i <= N - 4; i += 4) { ymm3 = _mm256_set1_pd(i * dt); // i*dt, i*dt, i*dt, i*dt ymm3 = _mm256_add_pd( ymm3, ymm2); // x = i*dt+3*dt, i*dt+2*dt, i*dt+dt, i*dt+0.0 ymm3 = _mm256_mul_pd(ymm3, ymm3); // x^2 = (i*dt+3*dt)^2, (i*dt+2*dt)^2, ... ymm3 = _mm256_add_pd( ymm0, ymm3); // 1+x^2 = 1+(i*dt+3*dt)^2, 1+(i*dt+2*dt)^2, ... ymm3 = _mm256_div_pd(ymm1, ymm3); // dt/(1+x^2) ymm4 = _mm256_add_pd(ymm4, ymm3); // pi += dt/(1+x^2) } double tmp[4] __attribute__((aligned(32))); _mm256_store_pd(tmp, ymm4); // move packed float64 values to 256-bit // aligned memory location pi += tmp[0] + tmp[1] + tmp[2] + tmp[3]; return pi * 4.0; } double compute_pi_avx_unroll(size_t N) { double pi = 0.0; double dt = 1.0 / N; register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14; ymm0 = _mm256_set1_pd(1.0); ymm1 = _mm256_set1_pd(dt); ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0); ymm3 = _mm256_set_pd(dt * 7, dt * 6, dt * 5, dt * 4); ymm4 = _mm256_set_pd(dt * 11, dt * 10, dt * 9, dt * 8); ymm5 = _mm256_set_pd(dt * 15, dt * 14, dt * 13, dt * 12); ymm6 = _mm256_setzero_pd(); // first sum of pi ymm7 = _mm256_setzero_pd(); // second sum of pi ymm8 = _mm256_setzero_pd(); // third sum of pi ymm9 = _mm256_setzero_pd(); // fourth sum of pi for (int i = 0; i <= N - 16; i += 16) { ymm14 = _mm256_set1_pd(i * dt); ymm10 = _mm256_add_pd(ymm14, ymm2); ymm11 = _mm256_add_pd(ymm14, ymm3); ymm12 = _mm256_add_pd(ymm14, ymm4); ymm13 = _mm256_add_pd(ymm14, ymm5); ymm10 = _mm256_mul_pd(ymm10, ymm10); ymm11 = _mm256_mul_pd(ymm11, ymm11); ymm12 = _mm256_mul_pd(ymm12, ymm12); ymm13 = _mm256_mul_pd(ymm13, ymm13); ymm10 = _mm256_add_pd(ymm0, ymm10); ymm11 = _mm256_add_pd(ymm0, ymm11); ymm12 = _mm256_add_pd(ymm0, ymm12); ymm13 = _mm256_add_pd(ymm0, ymm13); ymm10 = _mm256_div_pd(ymm1, ymm10); ymm11 = _mm256_div_pd(ymm1, ymm11); ymm12 = _mm256_div_pd(ymm1, ymm12); ymm13 = _mm256_div_pd(ymm1, ymm13); ymm6 = _mm256_add_pd(ymm6, ymm10); ymm7 = _mm256_add_pd(ymm7, ymm11); ymm8 = _mm256_add_pd(ymm8, ymm12); ymm9 = _mm256_add_pd(ymm9, ymm13); } double tmp1[4] __attribute__((aligned(32))); double tmp2[4] __attribute__((aligned(32))); double tmp3[4] __attribute__((aligned(32))); double tmp4[4] __attribute__((aligned(32))); _mm256_store_pd(tmp1, ymm6); _mm256_store_pd(tmp2, ymm7); _mm256_store_pd(tmp3, ymm8); _mm256_store_pd(tmp4, ymm9); pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] + tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] + tmp4[2] + tmp4[3]; return pi * 4.0; } double compute_pi_leibniz(size_t N) { double pi = 0.0; for (size_t i = 0; i < N; i++) { double tmp = (i & 1) ? (-1) : 1; pi += tmp / (2 * i + 1); } return pi * 4.0; } double compute_pi_leibniz_openmp(size_t N, int threads) { double pi = 0.0; #pragma omp parallel for num_threads(threads) reduction(+ : pi) for (size_t i = 0; i < N; i++) { double tmp = (i & 1) ? -1 : 1; pi += ((double) tmp) / (2 * i + 1); } return pi * 4.0; } double compute_pi_leibniz_avx(size_t N) { double pi = 0.0; register __m256d ymm0, ymm1, ymm2, ymm3, ymm4; ymm0 = _mm256_set_pd(1.0, -1.0, 1.0, -1.0); ymm1 = _mm256_set1_pd(1.0); ymm2 = _mm256_set1_pd(2.0); ymm4 = _mm256_setzero_pd(); // calculation result for (int i = 0; i <= N - 4; i += 4) { ymm3 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0); ymm3 = _mm256_mul_pd(ymm3, ymm2); // 2*i ymm3 = _mm256_add_pd(ymm3, ymm1); // 2*i+1 ymm3 = _mm256_div_pd(ymm0, ymm3); // (-1)^n / (2*i+1) ymm4 = _mm256_add_pd(ymm4, ymm3); } double tmp[4] __attribute__((aligned(32))); _mm256_store_pd(tmp, ymm4); // move packed float64 values to 256-bit // aligned memory location pi += tmp[0] + tmp[1] + tmp[2] + tmp[3]; return pi * 4.0; } double compute_pi_leibniz_avx_unroll(size_t N) { double pi = 0.0; register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9, ymm10; ymm0 = _mm256_set_pd(1.0, -1.0, 1.0, -1.0); ymm1 = _mm256_set1_pd(1.0); ymm2 = _mm256_set1_pd(2.0); ymm7 = _mm256_setzero_pd(); // first sum of pi ymm8 = _mm256_setzero_pd(); // second sum of pi ymm9 = _mm256_setzero_pd(); // third sum of pi ymm10 = _mm256_setzero_pd(); // fourth sum of pi for (int i = 0; i <= N - 16; i += 16) { ymm3 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0); ymm4 = _mm256_set_pd(i + 4.0, i + 5.0, i + 6.0, i + 7.0); ymm5 = _mm256_set_pd(i + 8.0, i + 9.0, i + 10.0, i + 11.0); ymm6 = _mm256_set_pd(i + 12.0, i + 13.0, i + 14.0, i + 15.0); ymm3 = _mm256_mul_pd(ymm3, ymm2); // 2*i ymm4 = _mm256_mul_pd(ymm4, ymm2); ymm5 = _mm256_mul_pd(ymm5, ymm2); ymm6 = _mm256_mul_pd(ymm6, ymm2); ymm3 = _mm256_add_pd(ymm3, ymm1); // 2*i+1 ymm4 = _mm256_add_pd(ymm4, ymm1); ymm5 = _mm256_add_pd(ymm5, ymm1); ymm6 = _mm256_add_pd(ymm6, ymm1); ymm3 = _mm256_div_pd(ymm0, ymm3); // (-1)^n / (2*i+1) ymm4 = _mm256_div_pd(ymm0, ymm4); ymm5 = _mm256_div_pd(ymm0, ymm5); ymm6 = _mm256_div_pd(ymm0, ymm6); ymm7 = _mm256_add_pd(ymm7, ymm3); ymm8 = _mm256_add_pd(ymm8, ymm4); ymm9 = _mm256_add_pd(ymm9, ymm5); ymm10 = _mm256_add_pd(ymm10, ymm6); } double tmp1[4] __attribute__((aligned(32))); double tmp2[4] __attribute__((aligned(32))); double tmp3[4] __attribute__((aligned(32))); double tmp4[4] __attribute__((aligned(32))); _mm256_store_pd(tmp1, ymm7); _mm256_store_pd(tmp2, ymm8); _mm256_store_pd(tmp3, ymm9); _mm256_store_pd(tmp4, ymm10); pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] + tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] + tmp4[2] + tmp4[3]; return pi * 4.0; } double compute_pi_euler(size_t N) { double pi = 0.0; for (size_t i = 1; i < N; i++) { pi += 1.0 / (i * i); } return sqrt(pi * 6); } double compute_pi_euler_openmp(size_t N, int threads) { double pi = 0.0; #pragma omp parallel for num_threads(threads) reduction(+ : pi) for (size_t i = 1; i < N; i++) { pi += 1.0 / (i * i); } return sqrt(pi * 6); } double compute_pi_euler_avx(size_t N) { double pi = 0.0; register __m256d ymm0, ymm1, ymm2, ymm3; ymm0 = _mm256_set1_pd(1.0); ymm1 = _mm256_set1_pd(6.0); ymm3 = _mm256_setzero_pd(); // calculation result for (int i = 1; i <= N - 4; i += 4) { ymm2 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0); ymm2 = _mm256_mul_pd(ymm2, ymm2); // i*i ymm2 = _mm256_div_pd(ymm0, ymm2); // 1/(i*i) ymm2 = _mm256_mul_pd(ymm1, ymm2); // 6/(i*i) ymm3 = _mm256_add_pd(ymm3, ymm2); } double tmp[4] __attribute__((aligned(32))); _mm256_store_pd(tmp, ymm3); // move packed float64 values to 256-bit // aligned memory location pi += tmp[0] + tmp[1] + tmp[2] + tmp[3]; return sqrt(pi); } double compute_pi_euler_avx_unroll(size_t N) { double pi = 0.0; register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9; ymm0 = _mm256_set1_pd(1.0); ymm1 = _mm256_set1_pd(6.0); ymm6 = _mm256_setzero_pd(); // first sum of pi ymm7 = _mm256_setzero_pd(); // second sum of pi ymm8 = _mm256_setzero_pd(); // third sum of pi ymm9 = _mm256_setzero_pd(); // fourth sum of pi for (int i = 1; i <= N - 16; i += 16) { ymm2 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0); ymm3 = _mm256_set_pd(i + 4.0, i + 5.0, i + 6.0, i + 7.0); ymm4 = _mm256_set_pd(i + 8.0, i + 9.0, i + 10.0, i + 11.0); ymm5 = _mm256_set_pd(i + 12.0, i + 13.0, i + 14.0, i + 15.0); ymm2 = _mm256_mul_pd(ymm2, ymm2); // i*i ymm3 = _mm256_mul_pd(ymm3, ymm3); ymm4 = _mm256_mul_pd(ymm4, ymm4); ymm5 = _mm256_mul_pd(ymm5, ymm5); ymm2 = _mm256_div_pd(ymm0, ymm2); // 1/(i*i) ymm3 = _mm256_div_pd(ymm0, ymm3); ymm4 = _mm256_div_pd(ymm0, ymm4); ymm5 = _mm256_div_pd(ymm0, ymm5); ymm2 = _mm256_mul_pd(ymm1, ymm2); // 6/(i*i) ymm3 = _mm256_mul_pd(ymm1, ymm3); ymm4 = _mm256_mul_pd(ymm1, ymm4); ymm5 = _mm256_mul_pd(ymm1, ymm5); ymm6 = _mm256_add_pd(ymm6, ymm2); ymm7 = _mm256_add_pd(ymm7, ymm3); ymm8 = _mm256_add_pd(ymm8, ymm4); ymm9 = _mm256_add_pd(ymm9, ymm5); } double tmp1[4] __attribute__((aligned(32))); double tmp2[4] __attribute__((aligned(32))); double tmp3[4] __attribute__((aligned(32))); double tmp4[4] __attribute__((aligned(32))); _mm256_store_pd(tmp1, ymm6); _mm256_store_pd(tmp2, ymm7); _mm256_store_pd(tmp3, ymm8); _mm256_store_pd(tmp4, ymm9); pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] + tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] + tmp4[2] + tmp4[3]; return sqrt(pi); } static double A = 13591409.0f, B = 545140134.0f, C = 640320.0f; pqt_t compute_pi_Chud_aux(size_t n1, size_t n2) { double an = pow(-1, n2 % 2) * (13591409.0f + 545140134.0f * n2); double pn = (2 * n2 - 1) * (6 * n2 - 5) * (6 * n2 - 1); double qn = n2 * n2 * n2 * 640320.0f * 640320.0f * 640320.0f / 24.0f; pqt_t rst = {.p = pn, .q = qn, .t = an * pn}; if (n1 + 1 == n2) return rst; else { size_t m = (n1 + n2) / 2; pqt_t t1 = compute_pi_Chud_aux(n1, m); pqt_t t2 = compute_pi_Chud_aux(m, n2); rst.p = t1.p * t2.p; rst.q = t1.q * t2.q; rst.t = t1.t * t2.q + t1.p * t2.t; return rst; } } pqt_t compute_pi_Chud_aux_unroll(size_t n1, size_t n2) {} double compute_pi_Chud(size_t N) { pqt_t rst = compute_pi_Chud_aux(0, N); printf("%lf %lf %lf", rst.p, rst.q, rst.t); return rst.q / (12 * rst.t + 12 * 13591409.0f * rst.q) * pow(640320.0f, 1.5f); }
GB_unaryop__abs_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_fp32 // op(A') function: GB_tran__abs_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_fp32 ( bool *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Cylinder.h
#ifndef CYLINDER_HEADER #define CYLINDER_HEADER #include "basic.h" #include <stdexcept> #include <utility> #include <MiscLib/Vector.h> #include "PointCloud.h" #include <ostream> #include <istream> #include <GfxTL/HyperplaneCoordinateSystem.h> #include <stdio.h> #include <MiscLib/NoShrinkVector.h> #include "LevMarLSWeight.h" #include "LevMarFitting.h" #ifndef DLL_LINKAGE #define DLL_LINKAGE #endif namespace schnabel { class DLL_LINKAGE Cylinder { public: struct ParallelNormalsError : public std::runtime_error { ParallelNormalsError(); }; enum { RequiredSamples = 2 }; Cylinder(); Cylinder(const Vec3f &axisDir, const Vec3f &axisPos, float radius); Cylinder(const Vec3f &pointA, const Vec3f &pointB, const Vec3f &normalA, const Vec3f &normalB); bool Init(const MiscLib::Vector< Vec3f > &samples); bool InitAverage(const MiscLib::Vector< Vec3f > &samples); bool Init(const Vec3f &axisDir, const Vec3f &axisPos, float radius); bool Init(const Vec3f &pointA, const Vec3f &pointB, const Vec3f &normalA, const Vec3f &normalB); bool Init(bool binary, std::istream *i); void Init(FILE *i); void Init(float* array); inline float Distance(const Vec3f &p) const; inline void Normal(const Vec3f &p, Vec3f *normal) const; inline float DistanceAndNormal(const Vec3f &p, Vec3f *normal) const; inline float SignedDistance(const Vec3f &p) const; void Project(const Vec3f &p, Vec3f *pp) const; // parameters are (height, angle) void Parameters(const Vec3f &p, std::pair< float, float > *param) const; float Radius() const; float &Radius(); const Vec3f &AxisDirection() const; Vec3f &AxisDirection(); const Vec3f &AxisPosition() const; Vec3f &AxisPosition(); const Vec3f AngularDirection() const; void RotateAngularDirection(float radians); bool LeastSquaresFit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end); template< class IteratorT > bool LeastSquaresFit(IteratorT begin, IteratorT end); bool Fit(const PointCloud &pc, MiscLib::Vector< size_t >::const_iterator begin, MiscLib::Vector< size_t >::const_iterator end) { return LeastSquaresFit(pc, begin, end); } static bool Interpolate(const MiscLib::Vector< Cylinder > &cylinders, const MiscLib::Vector< float > &weights, Cylinder *ic); void Serialize(bool binary, std::ostream *o) const; static size_t SerializedSize(); void Serialize(FILE *o) const; void Serialize(float* array) const; static size_t SerializedFloatSize(); void Transform(float scale, const Vec3f &translate); void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot, const GfxTL::Vector3Df &trans); inline unsigned int Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const; private: template< class WeightT > class LevMarCylinder : public WeightT { public: enum { NumParams = 7 }; typedef float ScalarType; template< class IteratorT > ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end, ScalarType *values, ScalarType *temp) const { ScalarType chi = 0; int size = end - begin; #pragma omp parallel for schedule(static) reduction(+:chi) for(int idx = 0; idx < size; ++idx) { Vec3f s; for(unsigned int j = 0; j < 3; ++j) s[j] = begin[idx][j] - params[j]; ScalarType u = params[5] * s[1] - params[4] * s[2]; u *= u; ScalarType v = params[3] * s[2] - params[5] * s[0]; u += v * v; v = params[4] * s[0] - params[3] * s[1]; u += v * v; temp[idx] = std::sqrt(u); chi += (values[idx] = WeightT::Weigh(temp[idx] - params[6])) * values[idx]; } return chi; } template< class IteratorT > void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end, const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const { int size = end - begin; #pragma omp parallel for schedule(static) for(int idx = 0; idx < size; ++idx) { Vec3f s; for(unsigned int j = 0; j < 3; ++j) s[j] = begin[idx][j] - params[j]; ScalarType g = s[0] * begin[idx][0] + s[1] * begin[idx][1] + s[2] * begin[idx][2]; if(temp[idx] < 1e-6) { matrix[idx * NumParams + 0] = std::sqrt(1 - params[3] * params[3]); matrix[idx * NumParams + 1] = std::sqrt(1 - params[4] * params[4]); matrix[idx * NumParams + 2] = std::sqrt(1 - params[5] * params[5]); } else { matrix[idx * NumParams + 0] = (params[3] * g - s[0]) / temp[idx]; matrix[idx * NumParams + 1] = (params[4] * g - s[1]) / temp[idx]; matrix[idx * NumParams + 2] = (params[5] * g - s[2]) / temp[idx]; } matrix[idx * NumParams + 3] = g * matrix[idx * NumParams + 0]; matrix[idx * NumParams + 4] = g * matrix[idx * NumParams + 1]; matrix[idx * NumParams + 5] = g * matrix[idx * NumParams + 2]; matrix[idx * NumParams + 6] = -1; WeightT::template DerivWeigh< NumParams >(temp[idx] - params[6], matrix + idx * NumParams); } } void Normalize(ScalarType *params) const { ScalarType l = std::sqrt(params[3] * params[3] + params[4] * params[4] + params[5] * params[5]); for(unsigned int i = 3; i < 6; ++i) params[i] /= l; // find point on axis closest to origin float lambda = -(params[0] * params[3] + params[1] * params[4] + params[2] * params[5]); for(unsigned int i = 0; i < 3; ++i) params[i] = params[i] + lambda * params[i + 3]; } }; private: Vec3f m_axisDir; Vec3f m_axisPos; float m_radius; GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs; float m_angularRotatedRadians; }; inline float Cylinder::Distance(const Vec3f &p) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); float axisDist = (diff - lambda * m_axisDir).length(); return fabs(axisDist - m_radius); } inline void Cylinder::Normal(const Vec3f &p, Vec3f *normal) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); *normal = diff - lambda * m_axisDir; normal->normalize(); } inline float Cylinder::DistanceAndNormal(const Vec3f &p, Vec3f *normal) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); *normal = diff - lambda * m_axisDir; float axisDist = normal->length(); if(axisDist > 0) *normal /= axisDist; return fabs(axisDist - m_radius); } inline float Cylinder::SignedDistance(const Vec3f &p) const { Vec3f diff = p - m_axisPos; float lambda = m_axisDir.dot(diff); float axisDist = (diff - lambda * m_axisDir).length(); return axisDist - m_radius; } template< class IteratorT > bool Cylinder::LeastSquaresFit(IteratorT begin, IteratorT end) { float param[7]; for(size_t i = 0; i < 3; ++i) param[i] = m_axisPos[i]; for(size_t i = 0; i < 3; ++i) param[i + 3] = m_axisDir[i]; param[6] = m_radius; LevMarCylinder< LevMarLSWeight > levMarCylinder; if(!LevMar(begin, end, levMarCylinder, param)) return false; for(size_t i = 0; i < 3; ++i) m_axisPos[i] = param[i]; for(size_t i = 0; i < 3; ++i) m_axisDir[i] = param[i + 3]; m_radius = param[6]; m_hcs.FromNormal(m_axisDir); m_angularRotatedRadians = 0; return true; } inline unsigned int Cylinder::Intersect(const Vec3f &p, const Vec3f &r, float *first, float *second) const { using namespace std; // Create a coordinate system for the cylinder. In this system, the // cylinder segment center C is the origin and the cylinder axis direction // W is the z-axis. U and V are the other coordinate axis directions. // If P = x*U+y*V+z*W, the cylinder is x^2 + y^2 = r^2, where r is the // cylinder radius. The end caps are |z| = h/2, where h is the cylinder // height. float fRSqr = m_radius * m_radius; // convert incoming line origin to cylinder coordinates Vec3f kDiff = p - m_axisPos; Vec3f kP(kDiff.dot(m_hcs[0]), kDiff.dot(m_hcs[1]), m_axisDir.dot(kDiff)); // Get the z-value, in cylinder coordinates, of the incoming line's // unit-length direction. float fDz = m_axisDir.dot(r); if(fabs(fDz) >= 1.f - 1e-7f) // The line is parallel to the cylinder axis. return 0; // convert incoming line unit-length direction to cylinder coordinates Vec3f kD(r.dot(m_hcs[0]), r.dot(m_hcs[1]), r.dot(m_axisDir)); float fA0, fA1, fA2, fDiscr, fRoot, fInv; // Test intersection of line P+t*D with infinite cylinder // x^2+y^2 = r^2. This reduces to computing the roots of a // quadratic equation. If P = (px,py,pz) and D = (dx,dy,dz), // then the quadratic equation is // (dx^2+dy^2)*t^2 + 2*(px*dx+py*dy)*t + (px^2+py^2-r^2) = 0 fA0 = kP[0]*kP[0] + kP[1]*kP[1] - fRSqr; fA1 = kP[0]*kD[0] + kP[1]*kD[1]; fA2 = kD[0]*kD[0] + kD[1]*kD[1]; fDiscr = fA1*fA1 - fA0*fA2; if (fDiscr < 0) // line does not intersect cylinder return 0; else if (fDiscr > 1e-7f) { // line intersects cylinder in two places fRoot = sqrt(fDiscr); fInv = (1.f)/fA2; *first = (-fA1 - fRoot)*fInv; *second = (-fA1 + fRoot)*fInv; return 2; } // line is tangent to the cylinder *first = -fA1/fA2; return 1; } } //...ns schnabel #endif
bml_adjungate_triangle_dense_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_adjungate_triangle.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_adjungate_triangle_dense.h" #include "bml_allocate_dense.h" #include "bml_types_dense.h" #include <complex.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Adjungates a triangle of a matrix in place. * * \ingroup adjungate_triangle_group * * \param A[in,out] The matrix for which the triangle should be adjungated * \param triangle[in] Which triangle to adjungate ('u': upper, 'l': lower) */ void TYPED_FUNC( bml_adjungate_triangle_dense) ( bml_matrix_dense_t * A, char *triangle) { int N = A->N; REAL_T *A_matrix = A->matrix; switch (*triangle) { case 'u': #pragma omp parallel for shared(N, A_matrix) for (int i = 0; i < N - 1; i++) { for (int j = i + 1; j < N; j++) { A_matrix[ROWMAJOR(j, i, N, N)] = conj(A_matrix[ROWMAJOR(i, j, N, N)]); } } break; case 'l': #pragma omp parallel for shared(N, A_matrix) for (int i = 0; i < N - 1; i++) { for (int j = i + 1; j < N; j++) { A_matrix[ROWMAJOR(i, j, N, N)] = conj(A_matrix[ROWMAJOR(j, i, N, N)]); } } break; default: LOG_ERROR("Unknown triangle type in bml_adjungate\n"); } }
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, // SIMD Version by Juan M. Cebrian, NTNU - 2013. (modifications under JMCG tag) #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifndef WIN32 // JMCG Need to define both #define fptype float /* JMCG BEGIN */ #ifdef PARSEC_USE_SSE #include "simd_defines.h" #endif #ifdef PARSEC_USE_AVX #include "simd_defines.h" #endif #ifdef PARSEC_USE_NEON #include "simd_defines.h" #endif #ifdef PARSEC_USE_AVX512 #include "simd_defines.h" #endif /* JMCG END */ #else #include <xmmintrin.h> #endif #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // #define DEBUG_SIMD #ifdef DEBUG_SIMD #define ERR_CHK #endif // We need this to compile the scalar version #ifndef SIMD_WIDTH #ifdef __GNUC__ #define _MM_ALIGN __attribute__((aligned (16))) #define MUSTINLINE __attribute__((always_inline)) #else #define MUSTINLINE __forceinline #endif #endif // END JMCG // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; _MM_ALIGN OptionData* data; // JMCG _MM_ALIGN fptype* prices; // JMCG int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 /* JMCG */ #ifdef SIMD_WIDTH MUSTINLINE _MM_TYPE CNDF_SIMD ( _MM_TYPE InputX ) { _MM_TYPE _x; _x = InputX; _MM_TYPE _k, _n, _accum, _candidate_answer, _flag, _A1 = _MM_SET(0.319381530), _A2 = _MM_SET(-0.356563782), _A3 = _MM_SET(1.781477937), _A4 = _MM_SET(-1.821255978), _A5 = _MM_SET(1.330274429), _INV_ROOT2PI = _MM_SET(0.39894228); //Get signs of _x _flag = (_MM_TYPE)_MM_CMPLT(_x, _MM_SET(0)); //Get absolute value of x _x = _MM_ABS(_x); // k = 1.0 / (1.0 + 0.2316419 * x); _k = _MM_DIV(_MM_SET(1), _MM_ADD(_MM_SET(1), _MM_MUL(_MM_SET(0.2316419), _x))); _accum = _MM_ADD(_A4, _MM_MUL(_A5, _k)); _accum = _MM_ADD(_A3, _MM_MUL(_accum, _k)); _accum = _MM_ADD(_A2, _MM_MUL(_accum, _k)); _accum = _MM_ADD(_A1, _MM_MUL(_accum, _k)); _accum = _MM_MUL(_accum, _k); // n = expf(-0.5 * x * x); // n *= INV_ROOT2PI; _n = _MM_MUL(_MM_EXP(_MM_MUL(_MM_MUL(_MM_SET(-.5), _x), _x)), _INV_ROOT2PI); // candidate_answer = 1.0 - n * accum; _candidate_answer = _MM_SUB(_MM_SET(1), _MM_MUL(_n, _accum)); // return (flag ? 1.0 - candidate_answer : candidate_answer); _candidate_answer = _MM_OR(_MM_ANDNOT(_flag, _candidate_answer), _MM_AND(_flag, _MM_SUB(_MM_SET(1), _candidate_answer))); return _candidate_answer; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// void BlkSchlsEqEuroNoDiv_SIMD (fptype * OptionPrice, int numOptions, fptype * sptprice, fptype * strike, fptype * rate, fptype * volatility, fptype * time, int * otype, float timet) { _MM_TYPE _d1, _d2, _c, _p, _Nd1, _Nd2, _expval, _answer, _tmp1, _T, _sigma, _K, _r, _S0; //Loads _T = _MM_LOADU(time); _sigma = _MM_LOADU(volatility); _K = _MM_LOADU(strike); _r = _MM_LOADU(rate); _S0 = _MM_LOADU(sptprice); // d1 = logf(S0/K) _d1 = _MM_DIV(_S0, _K); _d1 = _MM_LOG(_d1); // d1 = logf(S0/K) + (r + 0.5*sigma*sigma)*T; _tmp1 = _MM_MUL(_sigma, _sigma); // sigma*sigma _tmp1 = _MM_MUL(_tmp1, _MM_SET(.5)); // 0.5*sigma*sigma _tmp1 = _MM_ADD(_tmp1, _r); // r + 0.5*sigma*sigma _tmp1 = _MM_MUL(_tmp1, _T); // (r + 0.5*sigma*sigma)*T _d1 = _MM_ADD(_d1, _tmp1); // logf(S0/K) + (r + 0.5*sigma*sigma)*T _MM_TYPE _sqrt_T = _MM_SQRT(_T); // d1 /= (sigma * sqrt(T)); _d1 = _MM_DIV(_d1, _sigma); // d1 /= sigma _d1 = _MM_DIV(_d1, _sqrt_T); // d1 /= (sigma * sqrt(T)) // d2 = d1 - sigma * sqrt(T); _d2 = _MM_SUB(_d1, _MM_MUL(_sigma, _sqrt_T)); _Nd1 = CNDF_SIMD(_d1); _Nd2 = CNDF_SIMD(_d2); // expval = exp(-r*T) _expval = _MM_MUL(_T, _r); //Negate value of r by reversing the sign bit // _MM_TYPE _absmask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); // _expval = _mm_xor_ps(_absmask, _expval); _expval = _MM_NEG(_expval); _expval = _MM_EXP(_expval); // c = S0 * Nd1 - K * expval * Nd2; _c = _MM_SUB(_MM_MUL(_S0, _Nd1), _MM_MUL(_MM_MUL(_K, _expval), _Nd2)); // p = K * expval * (1.0 - Nd2) - S0 * (1.0 - Nd1); _p = _MM_SUB(_MM_MUL(_K, _MM_MUL(_expval, _MM_SUB(_MM_SET(1), _Nd2))), // K * expval * (1.0 - Nd2) _MM_MUL(_S0, _MM_SUB(_MM_SET(1), _Nd1))); // S0 * (1.0 - Nd1) // _tmp1 = (_MM_TYPE)_MM_CMPEQ(_MM_LOADU((float*)otype), _MM_SETZERO()); // otype ? // This looks weird but our ARM evaluation system seems to be running in // Runfast mode. In this mode Subnormal numbers are being flushed to zero (that is, the 0x0...1 stored in otype) // Casting everything to integer and using integer comparations seems to work // minimum positive subnormal number 00000001 1.40129846e-45 _tmp1 = _MM_CAST_I_TO_FP(_MM_CMPEQ_SIG(_MM_CAST_FP_TO_I(_MM_LOADU((fptype*)otype)), _MM_SETZERO_I())); _answer = _MM_OR(_MM_AND(_tmp1, _c), _MM_ANDNOT(_tmp1, _p)); _MM_STORE(OptionPrice, _answer); } #endif // SIMD_WIDTH /* JMCG END */ fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork(){} mainWork(mainWork &w, tbb::split){} /* JMCG */ #ifdef SIMD_WIDTH void operator()(const tbb::blocked_range<int> &range) const { _MM_ALIGN fptype price[SIMD_WIDTH]; fptype priceDelta; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i+=SIMD_WIDTH) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ BlkSchlsEqEuroNoDiv_SIMD( price, SIMD_WIDTH, &(sptprice[i]), &(strike[i]), &(rate[i]), &(volatility[i]), &(otime[i]), &(otype[i]), 0); for (int k=0; k<SIMD_WIDTH; k++) { prices[i+k] = price[k]; #ifdef ERR_CHK priceDelta = data[i+k].DGrefval - price[k]; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i+k, price, data[i+k].DGrefval, priceDelta); numError ++; } #endif } } } #else // !SIMD_WIDTH void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } #endif // SIMD_WIDTH /* JMCG END */ }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif /* JMCG */ #ifdef SIMD_WIDTH int i, j, k; _MM_ALIGN fptype price[SIMD_WIDTH]; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); #ifdef ENABLE_PARSEC_HOOKS __parsec_thread_begin(); #endif for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i += SIMD_WIDTH) { #else //ENABLE_OPENMP for (i=start; i<end; i += SIMD_WIDTH) { #endif //ENABLE_OPENMP // Calling main function to calculate option value based on Black & Scholes's // equation. BlkSchlsEqEuroNoDiv_SIMD(price, SIMD_WIDTH, &(sptprice[i]), &(strike[i]), &(rate[i]), &(volatility[i]), &(otime[i]), &(otype[i]), 0); for (k=0; k<SIMD_WIDTH; k++) { prices[i+k] = price[k]; } #ifdef ERR_CHK for (k=0; k<SIMD_WIDTH; k++) { priceDelta = data[i+k].DGrefval - price[k]; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i + k, price[k], data[i+k].DGrefval, priceDelta); numError ++; } } #endif } } #ifdef ENABLE_PARSEC_HOOKS __parsec_thread_end(); #endif return 0; } #else // ! SIMD_WIDTH int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); #ifdef ENABLE_PARSEC_HOOKS __parsec_thread_begin(); #endif for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } #ifdef ENABLE_PARSEC_HOOKS __parsec_thread_end(); #endif return 0; } #endif // SIMD_WIDTH /* JMCG END */ #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version " __PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } /* JMCG */ #ifdef SIMD_WIDTH if(SIMD_WIDTH > numOptions) { printf("ERROR: Not enough work for SIMD operation.\n"); fclose(file); exit(1); } if(nThreads > numOptions/SIMD_WIDTH) { printf("WARNING: Not enough work, reducing number of threads to match number of SIMD options packets.\n"); nThreads = numOptions/SIMD_WIDTH; } #endif /* JMCG END */ #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", (int)(numOptions * (sizeof(OptionData) + sizeof(int)))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
cffi.c
#include <stdbool.h> #include <stdio.h> #include <math.h> #include <inttypes.h> // Check little or big endian // Linux (32- or 64-bit) is probably little endian bool endian(void) { // https://stackoverflow.com/questions/12791864/c-program-to-check-little-vs-big-endian volatile uint32_t i=0x01234567; // return 0 for big endian, 1 for little endian. return (*((uint8_t*)(&i))) == 0x67; } void cffi_int32and( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int32or( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int32xor( const int n, const int* a, const int* b, int* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int32msbprojection( const int n, const int* original, const int* perturbed, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 31; bit_idx >= 0; bit_idx--) { int mask = 1 << bit_idx; int original_bit = original[elem_idx] & mask; int perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int32hammingdistance( const int n, const int* a, const int* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; int x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int32flip( const int n, const bool* mask, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[32*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32set( const int n, const bool* set1, const bool* set0, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int set1_mask = 0; int set0_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[32*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[32*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } int output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int32setzero( const int n, const int m, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int set0_mask = 0; for (int bit_idx = 0; bit_idx <32; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } int output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int32randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { int xor_mask = 0; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int32individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { int xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[32*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[32*elem_idx + bit_idx]; } if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int32maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const int* input, int* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { int xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(!protected_bits[bit_idx]) { int input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[32*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[32*elem_idx + bit_idx]; } if(rand_src[32*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int32bits( const int n, const int* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 32; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) int mask = 1 << bit_idx; int masked_input = input[elem_idx] & mask; int bit = masked_input >> bit_idx; output[32*elem_idx + bit_idx] = bit; } } } void cffi_int16and( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int16or( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int16xor( const int n, const short* a, const short* b, short* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int16msbprojection( const int n, const short* original, const short* perturbed, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 15; bit_idx >= 0; bit_idx--) { short mask = 1 << bit_idx; short original_bit = original[elem_idx] & mask; short perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int16hammingdistance( const int n, const short* a, const short* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; short x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int16flip( const int n, const bool* mask, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[16*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16set( const int n, const bool* set1, const bool* set0, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short set1_mask = 0; short set0_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[16*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[16*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } short output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int16setzero( const int n, const int m, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short set0_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } short output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int16randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { short xor_mask = 0; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int16individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { short xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[16*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[16*elem_idx + bit_idx]; } if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int16maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const short* input, short* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { short xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { short input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[16*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[16*elem_idx + bit_idx]; } if (rand_src[16*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int16bits( const int n, const short* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 16; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) short mask = 1 << bit_idx; short masked_input = input[elem_idx] & mask; short bit = masked_input >> bit_idx; output[16*elem_idx + bit_idx] = bit; } } } void cffi_int8and( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_int8or( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_int8xor( const int n, const char* a, const char* b, char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_int8msbprojection( const int n, const char* original, const char* perturbed, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 7; bit_idx >= 0; bit_idx--) { char mask = 1 << bit_idx; char original_bit = original[elem_idx] & mask; char perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_int8hammingdistance( const int n, const char* a, const char* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; char x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_int8flip( const int n, const bool* mask, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[8*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8set( const int n, const bool* set1, const bool* set0, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char set1_mask = 0; char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[8*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[8*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } char output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int8setzero( const int n, const int m, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } char output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_int8randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int8individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_int8maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const char* input, char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_int8bits( const int n, const char* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) char mask = 1 << bit_idx; char masked_input = input[elem_idx] & mask; char bit = masked_input >> bit_idx; output[8*elem_idx + bit_idx] = bit; } } } void cffi_uint8and( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] & b[elem_idx]; } } void cffi_uint8or( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] | b[elem_idx]; } } void cffi_uint8xor( const int n, const unsigned char* a, const unsigned char* b, unsigned char* c ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { c[elem_idx] = a[elem_idx] ^ b[elem_idx]; } } void cffi_uint8msbprojection( const int n, const unsigned char* original, const unsigned char* perturbed, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { output[elem_idx] = original[elem_idx]; // bit_idx = 0 would be LSB on little-endian for (int bit_idx = 7; bit_idx >= 0; bit_idx--) { unsigned char mask = 1 << bit_idx; unsigned char original_bit = original[elem_idx] & mask; unsigned char perturbed_bit = perturbed[elem_idx] & mask; if (original_bit != perturbed_bit) { output[elem_idx] ^= mask; break; } } } } void cffi_uint8hammingdistance( const int n, const unsigned char* a, const unsigned char* b, int* dist ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { dist[elem_idx] = 0; unsigned char x = a[elem_idx] ^ b[elem_idx]; while(x != 0) { x = x & (x-1); dist[elem_idx]++; } } } void cffi_uint8flip( const int n, const bool* mask, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if(mask[8*elem_idx + bit_idx]) { xor_mask |= (1 << bit_idx); } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8set( const int n, const bool* set1, const bool* set0, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char set1_mask = 0; unsigned char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (set1[8*elem_idx + bit_idx]) { set1_mask |= (1 << bit_idx); } if (set0[8*elem_idx + bit_idx]) { set0_mask |= (1 << bit_idx); } } unsigned char output_elem = input[elem_idx]; output_elem |= set1_mask; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_uint8setzero( const int n, const int m, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char set0_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { if (bit_idx < m) { // first bit is lSB if little endian (Linux 32- or 64-bit) set0_mask |= (1 << bit_idx); } } unsigned char output_elem = input[elem_idx]; output_elem &= (~set0_mask); // negation will do all bits set to 0 to 0 and all other's to 1, so and will do the setting output[elem_idx] = output_elem; } } void cffi_uint8randomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8maskedrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { unsigned char xor_mask = 0; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; float bit_flip_prob = (input_bit == 1) ? *one_bit_flip_prob : *zero_bit_flip_prob; if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_uint8individualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { unsigned char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } void cffi_uint8maskedindividualrandomflip( const int n, const float* zero_bit_flip_prob, const float* one_bit_flip_prob, const int* protected_bits, const int* len_protected_bits, const bool* mask, const float* rand_src, const unsigned char* input, unsigned char* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { if (!mask[elem_idx]) { output[elem_idx] = input[elem_idx]; } else { unsigned char xor_mask = 0; float bit_flip_prob; for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) if (!protected_bits[bit_idx]) { unsigned char input_bit = (input[elem_idx] & (1 << bit_idx)) >> bit_idx; if (input_bit == 1) { bit_flip_prob = one_bit_flip_prob[8*elem_idx + bit_idx]; } else { bit_flip_prob = zero_bit_flip_prob[8*elem_idx + bit_idx]; } if (rand_src[8*elem_idx + bit_idx] < bit_flip_prob) { xor_mask |= (1 << bit_idx); } } } output[elem_idx] = input[elem_idx]; output[elem_idx] ^= xor_mask; } } } void cffi_uint8bits( const int n, const unsigned char* input, bool* output ) { #pragma omp parallel for for (int elem_idx = 0; elem_idx < n; elem_idx++) { for (int bit_idx = 0; bit_idx < 8; bit_idx ++) { // first bit is lSB if little endian (Linux 32- or 64-bit) unsigned char mask = 1 << bit_idx; unsigned char masked_input = input[elem_idx] & mask; unsigned char bit = masked_input >> bit_idx; output[8*elem_idx + bit_idx] = bit; } } }
GB_unop__identity_fc32_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_bool) // op(A') function: GB (_unop_tran__identity_fc32_bool) // C type: GxB_FC32_t // A type: bool // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_bool) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__band_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__band_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint32) // A*D function (colscale): GB (_AxD__band_uint32) // D*A function (rowscale): GB (_DxB__band_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint32) // C=scalar+B GB (_bind1st__band_uint32) // C=scalar+B' GB (_bind1st_tran__band_uint32) // C=A+scalar GB (_bind2nd__band_uint32) // C=A'+scalar GB (_bind2nd_tran__band_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__band_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__band_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } //fine_to_coarse[i] += my_first_cpt+coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) { hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpHE * interpolation routine for hyperbolic PDEs * treats weak fine connections like strong fine connections *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly influence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(A_ext); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildDirInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real diagonal; HYPRE_Real sum_N_pos, sum_P_pos; HYPRE_Real sum_N_neg, sum_P_neg; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { HYPRE_Int *P_marker, *P_marker_offd; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ sum_N_pos = 0; sum_N_neg = 0; sum_P_pos = 0; sum_P_neg = 0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (num_functions == 1 || dof_func[i1] == dof_func[i]) { if (A_diag_data[jj] > 0) sum_N_pos += A_diag_data[jj]; else sum_N_neg += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; if (A_diag_data[jj] > 0) sum_P_pos += A_diag_data[jj]; else sum_P_neg += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (num_functions == 1 || dof_func_offd[i1] == dof_func[i]) { if (A_offd_data[jj] > 0) sum_N_pos += A_offd_data[jj]; else sum_N_neg += A_offd_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; if (A_offd_data[jj] > 0) sum_P_pos += A_offd_data[jj]; else sum_P_neg += A_offd_data[jj]; } } } if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal; if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { if (P_diag_data[jj]> 0) P_diag_data[jj] *= -beta; else P_diag_data[jj] *= -alfa; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { if (P_offd_data[jj]> 0) P_offd_data[jj] *= -beta; else P_offd_data[jj] *= -alfa; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int interp_type, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("DirInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts, interp_type, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*------------------------------------------------ * Drop entries in interpolation matrix P * max_elmts == 0 means no limit on rownnz *------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { if (trunc_factor <= 0.0 && max_elmts == 0) { return 0; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(P) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGInterpTruncationDevice(P, trunc_factor, max_elmts); } else #endif { HYPRE_Int rescale = 1; // rescale P HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type); } } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach. * here we need to pass in a strength matrix built on the entire matrix. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. HERE, we only want to distribut to points of the SAME function type *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0 ) { sum += A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } } else /* sum = 0 - only add to diag if the same function type */ { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. (only if the same function type) *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. AGAIN, we only want to distribut to points of the SAME function type *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } } else /* sum = 0 */ { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGTruncandBuild *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd); HYPRE_BigInt *new_col_map_offd; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int P_offd_size=0, new_num_cols_offd; HYPRE_Int *P_marker; HYPRE_Int i; HYPRE_Int index; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_offd_j = hypre_CSRMatrixJ(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_size = P_offd_i[n_fine]; } new_num_cols_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < num_cols_offd; i++) P_marker[i] = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { new_num_cols_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < new_num_cols_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], new_num_cols_offd); } index = 0; for (i = 0; i < new_num_cols_offd; i++) { while (P_marker[index] == 0) index++; new_col_map_offd[i] = col_map_offd[index]; index++; } if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (new_num_cols_offd) { hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd; } if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P); hypre_MatvecCommPkgCreate(P); return hypre_error_flag; } hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A, HYPRE_Real w) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrix *C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Real *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Real *C_offd_data; HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j; HYPRE_BigInt *col_map_offd_C; HYPRE_Int i, j, index; HYPRE_Real invdiag; HYPRE_Real w_local = w; C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts, row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]); hypre_ParCSRMatrixInitialize(C); C_diag = hypre_ParCSRMatrixDiag(C); C_offd = hypre_ParCSRMatrixOffd(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (i=0; i < num_cols_offd; i++) col_map_offd_C[i] = col_map_offd_A[i]; for (i=0; i < num_rows; i++) { index = A_diag_i[i]; invdiag = -w/A_diag_data[index]; C_diag_data[index] = 1.0-w; C_diag_j[index] = A_diag_j[index]; if (w == 0) { w_local = fabs(A_diag_data[index]); for (j = index+1; j < A_diag_i[i+1]; j++) w_local += fabs(A_diag_data[j]); for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) w_local += fabs(A_offd_data[j]); invdiag = -1/w_local; C_diag_data[index] = 1.0-A_diag_data[index]/w_local; } C_diag_i[i] = index; C_offd_i[i] = A_offd_i[i]; for (j = index+1; j < A_diag_i[i+1]; j++) { C_diag_data[j] = A_diag_data[j]*invdiag; C_diag_j[j] = A_diag_j[j]; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { C_offd_data[j] = A_offd_data[j]*invdiag; C_offd_j[j] = A_offd_j[j]; } } C_diag_i[num_rows] = A_diag_i[num_rows]; C_offd_i[num_rows] = A_offd_i[num_rows]; return C; } /* RL */ HYPRE_Int hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; /* csr's */ hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; /* arrays */ HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int num_cols_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; /* CF marker off-diag part */ HYPRE_Int *CF_marker_offd = NULL; /* func type off-diag part */ HYPRE_Int *dof_func_offd = NULL; /* nnz */ HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd; HYPRE_Int *marker_diag, *marker_offd = NULL; /* local size */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); /* number of C-pts */ HYPRE_Int n_cpts = 0; /* fine to coarse mapping: diag part and offd part */ HYPRE_Int *fine_to_coarse; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_BigInt total_global_cpts, my_first_cpt; HYPRE_Int my_id, num_procs; HYPRE_Int num_sends; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; //HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A); //HYPRE_Int col_end = col_start + n_fine; HYPRE_Int i, j, i1, j1, k1, index, start; HYPRE_Int *max_abs_cij; char *max_abs_diag_offd; HYPRE_Real max_abs_aij, vv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ /* CF marker for the off-diag columns */ if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* function type indicator for the off-diag columns */ if (num_functions > 1 && num_cols_A_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* if CommPkg of A is not present, create it */ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* number of sends to do (number of procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* send buffer, of size send_map_starts[num_sends]), * i.e., number of entries to send */ int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); /* copy CF markers of elements to send to buffer * RL: why copy them with two for loops? Why not just loop through all in one */ index = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); /* loop through all elems to send_proc[i] */ for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { /* CF marker of send_map_elemts[j] */ int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); /* do a similar communication for dof_func */ if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping, * and find the most strongly influencing C-pt for each F-pt *-----------------------------------------------------------------------*/ /* nnz in diag and offd parts */ cnt_diag = 0; cnt_offd = 0; max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); /* markers initialized as zeros */ marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { //fine_to_coarse[i] = my_first_cpt + n_cpts; fine_to_coarse[i] = n_cpts; n_cpts++; continue; } /* mark all the strong connections: in S */ HYPRE_Int MARK = i + 1; /* loop through row i of S, diag part */ for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { marker_diag[S_diag_j[j]] = MARK; } /* loop through row i of S, offd part */ if (num_procs > 1) { for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { j1 = S_offd_j[j]; marker_offd[j1] = MARK; } } fine_to_coarse[i] = -1; /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt * Find this C-pt and save it *--------------------------------------------------------------------------*/ /* if we failed to find any strong C-pt, mark this point as an 'n' */ char marker = 'n'; /* max abs val */ max_abs_aij = -1.0; /* loop through row i of A, diag part */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { i1 = A_diag_j[j]; vv = fabs(A_diag_data[j]); #if 0 /* !!! this is a hack just for code verification purpose !!! it basically says: 1. if we see |a_ij| < 1e-14, force it to be 1e-14 2. if we see |a_ij| == the max(|a_ij|) so far exactly, replace it if the j idx is smaller Reasons: 1. numerical round-off for eps-level values 2. entries in CSR rows may be listed in different orders */ vv = vv < 1e-14 ? 1e-14 : vv; if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv == max_abs_aij && i1 < max_abs_cij[i]) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; continue; } #endif /* it is a strong C-pt and has abs val larger than what have seen */ if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; } } /* offd part */ if (num_procs > 1) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { i1 = A_offd_j[j]; vv = fabs(A_offd_data[j]); if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij) { /* mark it as an 'o' */ marker = 'o'; max_abs_cij[i] = i1; max_abs_aij = vv; } } } max_abs_diag_offd[i] = marker; if (marker == 'd') { cnt_diag ++; } else if (marker == 'o') { cnt_offd ++; } } nnz_diag = cnt_diag + n_cpts; nnz_offd = cnt_offd; /*------------- allocate arrays */ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_DEVICE); /* not in ``if num_procs > 1'', * allocation needed even for empty CSR */ P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_DEVICE); /* redundant */ P_diag_i[0] = 0; P_offd_i[0] = 0; /* reset counters */ cnt_diag = 0; cnt_offd = 0; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST); big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { big_int_buf_data[index++] = my_first_cpt +(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * Second Pass: Populate P *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { if (CF_marker[i] >= 0) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. *--------------------------------------------------------------------*/ //P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[i]; P_diag_data[cnt_diag++] = 1.0; } else { /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt *--------------------------------------------------------------------------*/ if (max_abs_diag_offd[i] == 'd') { /* on diag part of P */ j = max_abs_cij[i]; //P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[j]; P_diag_data[cnt_diag++] = 1.0; } else if (max_abs_diag_offd[i] == 'o') { /* on offd part of P */ j = max_abs_cij[i]; P_offd_j[cnt_offd] = j; P_offd_data[cnt_offd++] = 1.0; } } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } hypre_assert(cnt_diag == nnz_diag); hypre_assert(cnt_offd == nnz_offd); /* num of cols in the offd part of P */ num_cols_offd_P = 0; /* marker_offd: all -1 */ for (i = 0; i < num_cols_A_offd; i++) { marker_offd[i] = -1; } for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; if (marker_offd[i1] == -1) { num_cols_offd_P++; marker_offd[i1] = 1; } } /* col_map_offd_P: the col indices of the offd of P * we first keep them be the offd-idx of A */ col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST); for (i = 0, i1 = 0; i < num_cols_A_offd; i++) { if (marker_offd[i] == 1) { tmp_map_offd[i1++] = i; } } hypre_assert(i1 == num_cols_offd_P); /* now, adjust P_offd_j to local idx w.r.t col_map_offd_R * by searching */ for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P); /* search must succeed */ hypre_assert(k1 >= 0 && k1 < num_cols_offd_P); P_offd_j[i] = k1; } /* change col_map_offd_P to global coarse ids */ for (i = 0; i < num_cols_offd_P; i++) { col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]]; } /* Now, we should have everything of Parcsr matrix P */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */ total_global_cpts, /* global num of cols */ hypre_ParCSRMatrixColStarts(A), /* row_starts */ num_cpts_global, /* col_starts */ num_cols_offd_P, /* num cols offd */ nnz_diag, nnz_offd); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* P does not own ColStarts, since A does */ hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; /* create CommPkg of P */ hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* free workspace */ hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST); hypre_TFree(marker_diag,HYPRE_MEMORY_HOST); hypre_TFree(marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST); return hypre_error_flag; }
MyMiscellany.h
/* Copyright (c) 2017, Michael Kazhdan All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Johns Hopkins University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef MY_MISCELLANY_INCLUDED #define MY_MISCELLANY_INCLUDED #include "Mesh/PoissonRecon/PreProcessor.h" ////////////////// // OpenMP Stuff // ////////////////// #ifdef _OPENMP #include <omp.h> #endif // _OPENMP //////////////// // Time Stuff // //////////////// #include <string.h> #include <sys/timeb.h> #ifndef WIN32 #include <sys/time.h> #endif // WIN32 inline double Time( void ) { #ifdef WIN32 struct _timeb t; _ftime( &t ); return double( t.time ) + double( t.millitm ) / 1000.0; #else // WIN32 struct timeval t; gettimeofday( &t , NULL ); return t.tv_sec + double( t.tv_usec ) / 1000000; #endif // WIN32 } #include <cstdio> #include <ctime> #include <chrono> struct Timer { Timer( void ){ _startCPUClock = std::clock() , _startWallClock = std::chrono::system_clock::now(); } double cpuTime( void ) const{ return (std::clock() - _startCPUClock) / (double)CLOCKS_PER_SEC; }; double wallTime( void ) const{ std::chrono::duration<double> diff = (std::chrono::system_clock::now() - _startWallClock) ; return diff.count(); } protected: std::clock_t _startCPUClock; std::chrono::time_point< std::chrono::system_clock > _startWallClock; }; /////////////// // I/O Stuff // /////////////// #if defined( _WIN32 ) || defined( _WIN64 ) const char FileSeparator = '\\'; #else // !_WIN const char FileSeparator = '/'; #endif // _WIN #ifndef SetTempDirectory #if defined( _WIN32 ) || defined( _WIN64 ) #define SetTempDirectory( tempDir , sz ) GetTempPath( (sz) , (tempDir) ) #else // !_WIN32 && !_WIN64 #define SetTempDirectory( tempDir , sz ) if( std::getenv( "TMPDIR" ) ) strcpy( tempDir , std::getenv( "TMPDIR" ) ); #endif // _WIN32 || _WIN64 #endif // !SetTempDirectory #include <stdarg.h> #include <vector> #include <string> struct MessageWriter { char* outputFile; bool echoSTDOUT; MessageWriter( void ){ outputFile = NULL , echoSTDOUT = true; } void operator() ( const char* format , ... ) { if( outputFile ) { FILE* fp = fopen( outputFile , "a" ); va_list args; va_start( args , format ); vfprintf( fp , format , args ); fclose( fp ); va_end( args ); } if( echoSTDOUT ) { va_list args; va_start( args , format ); vprintf( format , args ); va_end( args ); } } void operator() ( std::vector< char* >& messages , const char* format , ... ) { if( outputFile ) { FILE* fp = fopen( outputFile , "a" ); va_list args; va_start( args , format ); vfprintf( fp , format , args ); fclose( fp ); va_end( args ); } if( echoSTDOUT ) { va_list args; va_start( args , format ); vprintf( format , args ); va_end( args ); } // [WARNING] We are not checking the string is small enough to fit in 1024 characters messages.push_back( new char[1024] ); char* str = messages.back(); va_list args; va_start( args , format ); vsprintf( str , format , args ); va_end( args ); if( str[strlen(str)-1]=='\n' ) str[strlen(str)-1] = 0; } void operator() ( std::vector< std::string >& messages , const char* format , ... ) { if( outputFile ) { FILE* fp = fopen( outputFile , "a" ); va_list args; va_start( args , format ); vfprintf( fp , format , args ); fclose( fp ); va_end( args ); } if( echoSTDOUT ) { va_list args; va_start( args , format ); vprintf( format , args ); va_end( args ); } // [WARNING] We are not checking the string is small enough to fit in 1024 characters char message[1024]; va_list args; va_start( args , format ); vsprintf( message , format , args ); va_end( args ); if( message[strlen(message)-1]=='\n' ) message[strlen(message)-1] = 0; messages.push_back( std::string( message ) ); } }; ///////////////////////////////////// // Exception, Warnings, and Errors // ///////////////////////////////////// #include <exception> #include <string> #include <iostream> #include <sstream> #include <algorithm> namespace MKExceptions { template< typename ... Arguments > void _AddToMessageStream( std::stringstream &stream , Arguments ... arguments ); inline void _AddToMessageStream( std::stringstream &stream ){ return; } template< typename Argument , typename ... Arguments > void _AddToMessageStream( std::stringstream &stream , Argument argument , Arguments ... arguments ) { stream << argument; _AddToMessageStream( stream , arguments ... ); } template< typename ... Arguments > std::string MakeMessageString( std::string header , std::string fileName , int line , std::string functionName , Arguments ... arguments ) { size_t headerSize = header.size(); std::stringstream stream; // The first line is the header, the file name , and the line number stream << header << " " << fileName << " (Line " << line << ")" << std::endl; // Inset the second line by the size of the header and write the function name for( size_t i=0 ; i<=headerSize ; i++ ) stream << " "; stream << functionName << std::endl; // Inset the third line by the size of the header and write the rest for( size_t i=0 ; i<=headerSize ; i++ ) stream << " "; _AddToMessageStream( stream , arguments ... ); return stream.str(); } struct Exception : public std::exception { const char *what( void ) const noexcept { return _message.c_str(); } template< typename ... Args > Exception( const char *fileName , int line , const char *functionName , const char *format , Args ... args ) { _message = MakeMessageString( "[EXCEPTION]" , fileName , line , functionName , format , args ... ); } private: std::string _message; }; template< typename ... Args > void Throw( const char *fileName , int line , const char *functionName , const char *format , Args ... args ){ throw Exception( fileName , line , functionName , format , args ... ); } template< typename ... Args > void Warn( const char *fileName , int line , const char *functionName , const char *format , Args ... args ) { std::cerr << MakeMessageString( "[WARNING]" , fileName , line , functionName , format , args ... ) << std::endl; } template< typename ... Args > void ErrorOut( const char *fileName , int line , const char *functionName , const char *format , Args ... args ) { std::cerr << MakeMessageString( "[ERROR]" , fileName , line , functionName , format , args ... ) << std::endl; exit( 0 ); } } #ifndef WARN #define WARN( ... ) MKExceptions::Warn( __FILE__ , __LINE__ , __FUNCTION__ , __VA_ARGS__ ) #endif // WARN #ifndef WARN_ONCE #define WARN_ONCE( ... ) { static bool firstTime = true ; if( firstTime ) MKExceptions::Warn( __FILE__ , __LINE__ , __FUNCTION__ , __VA_ARGS__ ) ; firstTime = false; } #endif // WARN_ONCE #ifndef THROW #define THROW( ... ) MKExceptions::Throw( __FILE__ , __LINE__ , __FUNCTION__ , __VA_ARGS__ ) #endif // THROW #ifndef ERROR_OUT #define ERROR_OUT( ... ) MKExceptions::ErrorOut( __FILE__ , __LINE__ , __FUNCTION__ , __VA_ARGS__ ) #endif // ERROR_OUT #include <signal.h> #if defined(_WIN32) || defined( _WIN64 ) #else // !WINDOWS #include <execinfo.h> #include <unistd.h> #include <cxxabi.h> #include <mutex> #endif // WINDOWS struct StackTracer { static const char *exec; #if defined(_WIN32) || defined( _WIN64 ) static void Trace( void ) { } #else // !WINDOWS static void Trace( void ) { static std::mutex mutex; std::lock_guard< std::mutex > lock(mutex); // Code borrowed from: // https://stackoverflow.com/questions/77005/how-to-automatically-generate-a-stacktrace-when-my-program-crashes // and // https://stackoverflow.com/questions/15129089/is-there-a-way-to-dump-stack-trace-with-line-number-from-a-linux-release-binary/15130037 void * trace[128]; int size = backtrace( trace , 128 ); char ** messages = backtrace_symbols( trace , size ); for( int i=1 ; i< size && messages!=NULL ; ++i ) { char *mangled_name=0 , *offset_begin=0 , *offset_end=0; char syscom[1024]; sprintf( syscom , "addr2line %p -e %s" , trace[i] , exec ); //last parameter is the name of this app if( !system( syscom ) ){} // find parantheses and +address offset surrounding mangled name for( char *p=messages[i] ; *p ; ++p ) { if ( *p=='(' ) mangled_name = p; else if( *p=='+' ) offset_begin = p; else if( *p==')' ) { offset_end = p; break; } } // if the line could be processed, attempt to demangle the symbol if( mangled_name && offset_begin && offset_end && mangled_name<offset_begin ) { *mangled_name++ = '\0'; *offset_begin++ = '\0'; *offset_end++ = '\0'; int status; char * real_name = abi::__cxa_demangle(mangled_name, 0, 0, &status); // if demangling is successful, output the demangled function name if( !status ) { std::cerr << "\t(" << i << ") " << messages[i] << " : " << real_name << "+" << offset_begin << offset_end << std::endl; std::cout << "\t(" << i << ") " << messages[i] << " : " << real_name << "+" << offset_begin << offset_end << std::endl; } // otherwise, output the mangled function name else { std::cerr << "\t(" << i << ") " << messages[i] << " : " << mangled_name << "+" << offset_begin << offset_end << std::endl; std::cout << "\t(" << i << ") " << messages[i] << " : " << mangled_name << "+" << offset_begin << offset_end << std::endl; } free( real_name ); } // otherwise, print the whole line else { std::cerr << "\t(" << i << ") " << messages[i] << std::endl; std::cout << "\t(" << i << ") " << messages[i] << std::endl; } } free( messages ); } #endif // WINDOWS }; const char *StackTracer::exec; inline void SignalHandler( int signal ) { printf( "Signal: %d\n" , signal ); StackTracer::Trace(); exit( 0 ); }; template< typename Value > bool SetAtomic( volatile Value *value , Value newValue , Value oldValue ); template< typename Data > void AddAtomic( Data& a , Data b ); //////////////////// // MKThread Stuff // //////////////////// #include <thread> #include <mutex> #include <vector> #include <atomic> #include <condition_variable> #include <functional> #include <chrono> #include <future> #include <memory> struct ThreadPool { enum ParallelType { #ifdef _OPENMP OPEN_MP , #endif // _OPENMP THREAD_POOL , ASYNC , NONE }; static const std::vector< std::string > ParallelNames; enum ScheduleType { STATIC , DYNAMIC }; static const std::vector< std::string > ScheduleNames; static size_t DefaultChunkSize; static ScheduleType DefaultSchedule; template< typename ... Functions > static void ParallelSections( const Functions & ... functions ) { std::vector< std::future< void > > futures( sizeof...(Functions) ); _ParallelSections( &futures[0] , functions ... ); for( size_t t=0 ; t<futures.size() ; t++ ) futures[t].get(); } static void Parallel_for( size_t begin , size_t end , const std::function< void ( unsigned int , size_t ) > &iterationFunction , ScheduleType schedule=DefaultSchedule , size_t chunkSize=DefaultChunkSize ) { if( begin>=end ) return; size_t range = end - begin; size_t chunks = ( range + chunkSize - 1 ) / chunkSize; unsigned int threads = (unsigned int)NumThreads(); std::atomic< size_t > index; index.store( 0 ); if( range<chunkSize || _ParallelType==NONE || threads==1 ) { for( size_t i=begin ; i<end ; i++ ) iterationFunction( 0 , i ); return; } auto _ChunkFunction = [ &iterationFunction , begin , end , chunkSize ]( unsigned int thread , size_t chunk ) { const size_t _begin = begin + chunkSize*chunk; const size_t _end = std::min< size_t >( end , _begin+chunkSize ); for( size_t i=_begin ; i<_end ; i++ ) iterationFunction( thread , i ); }; auto _StaticThreadFunction = [ &_ChunkFunction , chunks , threads ]( unsigned int thread ) { for( size_t chunk=thread ; chunk<chunks ; chunk+=threads ) _ChunkFunction( thread , chunk ); }; auto _DynamicThreadFunction = [ &_ChunkFunction , chunks , &index ]( unsigned int thread ) { size_t chunk; while( ( chunk=index.fetch_add(1) )<chunks ) _ChunkFunction( thread , chunk ); }; if ( schedule==STATIC ) _ThreadFunction = _StaticThreadFunction; else if( schedule==DYNAMIC ) _ThreadFunction = _DynamicThreadFunction; if( false ){} #ifdef _OPENMP else if( _ParallelType==OPEN_MP ) { if( schedule==STATIC ) #pragma omp parallel for num_threads( threads ) schedule( static , 1 ) for( int c=0 ; c<chunks ; c++ ) _ChunkFunction( omp_get_thread_num() , c ); else if( schedule==DYNAMIC ) #pragma omp parallel for num_threads( threads ) schedule( dynamic , 1 ) for( int c=0 ; c<chunks ; c++ ) _ChunkFunction( omp_get_thread_num() , c ); } #endif // _OPENMP else if( _ParallelType==ASYNC ) { static std::vector< std::future< void > > futures; futures.resize( threads-1 ); for( unsigned int t=1 ; t<threads ; t++ ) futures[t-1] = std::async( std::launch::async , _ThreadFunction , t ); _ThreadFunction( 0 ); for( unsigned int t=1 ; t<threads ; t++ ) futures[t-1].get(); } else if( _ParallelType==THREAD_POOL ) { unsigned int targetTasks = 0; if( !SetAtomic( &_RemainingTasks , threads-1 , targetTasks ) ) { WARN( "nested for loop, reverting to serial" ); for( size_t i=begin ; i<end ; i++ ) iterationFunction( 0 , i ); } else { _WaitingForWorkOrClose.notify_all(); { std::unique_lock< std::mutex > lock( _Mutex ); _DoneWithWork.wait( lock , [&]( void ){ return _RemainingTasks==0; } ); } } } } static unsigned int NumThreads( void ){ return (unsigned int)_Threads.size()+1; } static void Init( ParallelType parallelType , unsigned int numThreads=std::thread::hardware_concurrency() ) { _ParallelType = parallelType; if( _Threads.size() && !_Close ) { _Close = true; _WaitingForWorkOrClose.notify_all(); for( unsigned int t=0 ; t<_Threads.size() ; t++ ) _Threads[t].join(); } _Close = true; numThreads--; _Threads.resize( numThreads ); if( _ParallelType==THREAD_POOL ) { _RemainingTasks = 0; _Close = false; for( unsigned int t=0 ; t<numThreads ; t++ ) _Threads[t] = std::thread( _ThreadInitFunction , t ); } } static void Terminate( void ) { if( _Threads.size() && !_Close ) { _Close = true; _WaitingForWorkOrClose.notify_all(); for( unsigned int t=0 ; t<_Threads.size() ; t++ ) _Threads[t].join(); _Threads.resize( 0 ); } } private: ThreadPool( const ThreadPool & ){} ThreadPool &operator = ( const ThreadPool & ){ return *this; } template< typename Function > static void _ParallelSections( std::future< void > *futures , const Function &function ){ *futures = std::async( std::launch::async , function ); } template< typename Function , typename ... Functions > static void _ParallelSections( std::future< void > *futures , const Function &function , const Functions& ... functions ) { *futures = std::async( std::launch::async , function ); _ParallelSections( futures+1 , functions ... ); } static void _ThreadInitFunction( unsigned int thread ) { // Wait for the first job to come in std::unique_lock< std::mutex > lock( _Mutex ); _WaitingForWorkOrClose.wait( lock ); while( !_Close ) { lock.unlock(); // do the job _ThreadFunction( thread ); // Notify and wait for the next job lock.lock(); _RemainingTasks--; if( !_RemainingTasks ) _DoneWithWork.notify_all(); _WaitingForWorkOrClose.wait( lock ); } } static bool _Close; static volatile unsigned int _RemainingTasks; static std::mutex _Mutex; static std::condition_variable _WaitingForWorkOrClose , _DoneWithWork; static std::vector< std::thread > _Threads; static std::function< void ( unsigned int ) > _ThreadFunction; static ParallelType _ParallelType; }; size_t ThreadPool::DefaultChunkSize = 128; ThreadPool::ScheduleType ThreadPool::DefaultSchedule = ThreadPool::DYNAMIC; bool ThreadPool::_Close; volatile unsigned int ThreadPool::_RemainingTasks; std::mutex ThreadPool::_Mutex; std::condition_variable ThreadPool::_WaitingForWorkOrClose; std::condition_variable ThreadPool::_DoneWithWork; std::vector< std::thread > ThreadPool::_Threads; std::function< void ( unsigned int ) > ThreadPool::_ThreadFunction; ThreadPool::ParallelType ThreadPool::_ParallelType; const std::vector< std::string >ThreadPool::ParallelNames = { #ifdef _OPENMP "open mp" , #endif // _OPENMP "thread pool" , "async" , "none" }; const std::vector< std::string >ThreadPool::ScheduleNames = { "static" , "dynamic" }; #include <mutex> #if defined( _WIN32 ) || defined( _WIN64 ) #include <windows.h> #endif // _WIN32 || _WIN64 template< typename Value > bool SetAtomic32( volatile Value *value , Value newValue , Value oldValue ) { #if defined( _WIN32 ) || defined( _WIN64 ) long &_oldValue = *(long *)&oldValue; long &_newValue = *(long *)&newValue; return InterlockedCompareExchange( (long*)value , _newValue , _oldValue )==_oldValue; #else // !_WIN32 && !_WIN64 uint32_t &_oldValue = *(uint32_t *)&oldValue; uint32_t &_newValue = *(uint32_t *)&newValue; // return __sync_bool_compare_and_swap( (uint32_t *)value , _oldValue , _newValue ); return __atomic_compare_exchange_n( (uint32_t *)value , (uint32_t *)&oldValue , _newValue , false , __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST ); #endif // _WIN32 || _WIN64 } template< typename Value > bool SetAtomic64( volatile Value *value , Value newValue , Value oldValue ) { #if defined( _WIN32 ) || defined( _WIN64 ) __int64 &_oldValue = *(__int64 *)&oldValue; __int64 &_newValue = *(__int64 *)&newValue; return InterlockedCompareExchange64( (__int64*)value , _newValue , _oldValue )==_oldValue; #else // !_WIN32 && !_WIN64 uint64_t &_oldValue = *(uint64_t *)&oldValue; uint64_t &_newValue = *(uint64_t *)&newValue; // return __sync_bool_compare_and_swap ( (uint64_t *)&value , _oldValue , _newValue ); return __atomic_compare_exchange_n( (uint64_t *)value , (uint64_t *)&oldValue , _newValue , false , __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST ); #endif // _WIN32 || _WIN64 } template< typename Number > void AddAtomic32( Number &a , Number b ) { #if 0 Number current = a; Number sum = current+b; while( !SetAtomic32( &a , sum , current ) ) current = a , sum = a+b; #else #if defined( _WIN32 ) || defined( _WIN64 ) Number current = a; Number sum = current+b; long &_current = *(long *)&current; long &_sum = *(long *)&sum; while( InterlockedCompareExchange( (long*)&a , _sum , _current )!=_current ) current = a , sum = a+b; #else // !_WIN32 && !_WIN64 Number current = a; Number sum = current+b; uint32_t &_current = *(uint32_t *)&current; uint32_t &_sum = *(uint32_t *)&sum; while( __sync_val_compare_and_swap( (uint32_t *)&a , _current , _sum )!=_current ) current = a , sum = a+b; #endif // _WIN32 || _WIN64 #endif } template< typename Number > void AddAtomic64( Number &a , Number b ) { #if 1 Number current = a; Number sum = current+b; while( !SetAtomic64( &a , sum , current ) ) current = a , sum = a+b; #else #if defined( _WIN32 ) || defined( _WIN64 ) Number current = a; Number sum = current+b; __int64 &_current = *(__int64 *)&current; __int64 &_sum = *(__int64 *)&sum; while( InterlockedCompareExchange64( (__int64*)&a , _sum , _current )!=_current ) current = a , sum = a+b; #else // !_WIN32 && !_WIN64 Number current = a; Number sum = current+b; uint64_t &_current = *(uint64_t *)&current; uint64_t &_sum = *(uint64_t *)&sum; while( __sync_val_compare_and_swap( (uint64_t *)&a , _current , _sum )!=_current ) current = a , sum = a+b; #endif // _WIN32 || _WIN64 #endif } template< typename Value > bool SetAtomic( volatile Value *value , Value newValue , Value oldValue ) { switch( sizeof(Value) ) { case 4: return SetAtomic32( value , newValue , oldValue ); case 8: return SetAtomic64( value , newValue , oldValue ); default: WARN_ONCE( "should not use this function: " , sizeof(Value) ); static std::mutex setAtomicMutex; std::lock_guard< std::mutex > lock( setAtomicMutex ); if( *value==oldValue ){ *value = newValue ; return true; } else return false; } } template< typename Data > void AddAtomic( Data& a , Data b ) { switch( sizeof(Data) ) { case 4: return AddAtomic32( a , b ); case 8: return AddAtomic64( a , b ); default: WARN_ONCE( "should not use this function: " , sizeof(Data) ); static std::mutex addAtomicMutex; std::lock_guard< std::mutex > lock( addAtomicMutex ); a += b; } } ///////////////////////// // NumberWrapper Stuff // ///////////////////////// #include <vector> struct EmptyNumberWrapperClass{}; template< typename Number , typename Type=EmptyNumberWrapperClass , size_t I=0 > struct NumberWrapper { typedef Number type; Number n; NumberWrapper( Number _n=0 ) : n(_n){} NumberWrapper operator + ( NumberWrapper _n ) const { return NumberWrapper( n + _n.n ); } NumberWrapper operator - ( NumberWrapper _n ) const { return NumberWrapper( n - _n.n ); } NumberWrapper operator * ( NumberWrapper _n ) const { return NumberWrapper( n * _n.n ); } NumberWrapper operator / ( NumberWrapper _n ) const { return NumberWrapper( n / _n.n ); } NumberWrapper &operator += ( NumberWrapper _n ){ n += _n.n ; return *this; } NumberWrapper &operator -= ( NumberWrapper _n ){ n -= _n.n ; return *this; } NumberWrapper &operator *= ( NumberWrapper _n ){ n *= _n.n ; return *this; } NumberWrapper &operator /= ( NumberWrapper _n ){ n /= _n.n ; return *this; } bool operator == ( NumberWrapper _n ) const { return n==_n.n; } bool operator != ( NumberWrapper _n ) const { return n!=_n.n; } bool operator < ( NumberWrapper _n ) const { return n<_n.n; } bool operator > ( NumberWrapper _n ) const { return n>_n.n; } bool operator <= ( NumberWrapper _n ) const { return n<=_n.n; } bool operator >= ( NumberWrapper _n ) const { return n>=_n.n; } NumberWrapper operator ++ ( int ) { NumberWrapper _n(n) ; n++ ; return _n; } NumberWrapper operator -- ( int ) { NumberWrapper _n(n) ; n-- ; return _n; } NumberWrapper &operator ++ ( void ) { n++ ; return *this; } NumberWrapper &operator -- ( void ) { n-- ; return *this; } explicit operator Number () const { return n; } }; #if 0 template< typename Number , typename Type , size_t I > struct std::atomic< NumberWrapper< Number , Type , I > > { typedef Number type; std::atomic< Number > n; atomic( Number _n=0 ) : n(_n){} atomic( const std::atomic< Number > &_n ) : n(_n){} atomic( NumberWrapper< Number , Type , I > _n ) : n(_n.n){} atomic &operator = ( Number _n ){ n = _n ; return *this; } // atomic &operator = ( const atomic &a ){ n = a.n ; return *this; } // atomic &operator = ( const NumberWrapper< Number , Type , I > &_n ){ n = _n.n ; return *this; } atomic operator + ( atomic _n ) const { return atomic( n + _n.n ); } atomic operator - ( atomic _n ) const { return atomic( n * _n.n ); } atomic operator * ( atomic _n ) const { return atomic( n * _n.n ); } atomic operator / ( atomic _n ) const { return atomic( n / _n.n ); } atomic &operator += ( atomic _n ){ n += _n.n ; return *this; } atomic &operator -= ( atomic _n ){ n -= _n.n ; return *this; } atomic &operator *= ( atomic _n ){ n *= _n.n ; return *this; } atomic &operator /= ( atomic _n ){ n /= _n.n ; return *this; } bool operator == ( atomic _n ) const { return n==_n.n; } bool operator != ( atomic _n ) const { return n!=_n.n; } bool operator < ( atomic _n ) const { return n<_n.n; } bool operator > ( atomic _n ) const { return n>_n.n; } bool operator <= ( atomic _n ) const { return n<=_n.n; } bool operator >= ( atomic _n ) const { return n>=_n.n; } atomic operator ++ ( int ) { atomic _n(n) ; n++ ; return _n; } atomic operator -- ( int ) { atomic _n(n) ; n-- ; return _n; } atomic &operator ++ ( void ) { n++ ; return *this; } atomic &operator -- ( void ) { n-- ; return *this; } operator NumberWrapper< Number , Type , I >() const { return NumberWrapper< Number , Type , I >(n); } explicit operator Number () const { return n; } }; #endif namespace std { template< typename Number , typename Type , size_t I > struct hash< NumberWrapper< Number , Type , I > > { size_t operator()( NumberWrapper< Number , Type , I > n ) const { return std::hash< Number >{}( n.n ); } }; } template< typename Data , typename _NumberWrapper > struct VectorWrapper : public std::vector< Data > { VectorWrapper( void ){} VectorWrapper( size_t sz ) : std::vector< Data >( sz ){} VectorWrapper( size_t sz , Data d ) : std::vector< Data >( sz , d ){} // void resize( _NumberWrapper n ) { std::vector< Data >::resize( (size_t)(_NumberWrapper::type)n ); } // void resize( _NumberWrapper n , Data d ){ std::vector< Data >::resize( (size_t)(_NumberWrapper::type)n , d ); } typename std::vector< Data >::reference operator[]( _NumberWrapper n ){ return std::vector< Data >::operator[]( n.n ); } typename std::vector< Data >::const_reference operator[]( _NumberWrapper n ) const { return std::vector< Data >::operator[]( n.n ); } }; ////////////////// // Memory Stuff // ////////////////// size_t getPeakRSS( void ); size_t getCurrentRSS( void ); struct MemoryInfo { static size_t Usage( void ){ return getCurrentRSS(); } static int PeakMemoryUsageMB( void ){ return (int)( getPeakRSS()>>20 ); } }; #if defined( _WIN32 ) || defined( _WIN64 ) #include <Windows.h> #include <Psapi.h> inline void SetPeakMemoryMB( size_t sz ) { sz <<= 20; SIZE_T peakMemory = sz; HANDLE h = CreateJobObject( NULL , NULL ); AssignProcessToJobObject( h , GetCurrentProcess() ); JOBOBJECT_EXTENDED_LIMIT_INFORMATION jeli = { 0 }; jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_JOB_MEMORY; jeli.JobMemoryLimit = peakMemory; if( !SetInformationJobObject( h , JobObjectExtendedLimitInformation , &jeli , sizeof( jeli ) ) ) WARN( "Failed to set memory limit" ); } #else // !_WIN32 && !_WIN64 #include <sys/time.h> #include <sys/resource.h> inline void SetPeakMemoryMB( size_t sz ) { sz <<= 20; struct rlimit rl; getrlimit( RLIMIT_AS , &rl ); rl.rlim_cur = sz; setrlimit( RLIMIT_AS , &rl ); } #endif // _WIN32 || _WIN64 /* * Author: David Robert Nadeau * Site: http://NadeauSoftware.com/ * License: Creative Commons Attribution 3.0 Unported License * http://creativecommons.org/licenses/by/3.0/deed.en_US */ #if defined(_WIN32) || defined( _WIN64 ) #include <windows.h> #include <psapi.h> #elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__)) #include <unistd.h> #include <sys/resource.h> #if defined(__APPLE__) && defined(__MACH__) #include <mach/mach.h> #elif (defined(_AIX) || defined(__TOS__AIX__)) || (defined(__sun__) || defined(__sun) || defined(sun) && (defined(__SVR4) || defined(__svr4__))) #include <fcntl.h> #include <procfs.h> #elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) #include <stdio.h> #endif #else #error "Cannot define getPeakRSS( ) or getCurrentRSS( ) for an unknown OS." #endif /** * Returns the peak (maximum so far) resident set size (physical * memory use) measured in bytes, or zero if the value cannot be * determined on this OS. */ inline size_t getPeakRSS( ) { #if defined(_WIN32) /* Windows -------------------------------------------------- */ PROCESS_MEMORY_COUNTERS info; GetProcessMemoryInfo( GetCurrentProcess( ), &info, sizeof(info) ); return (size_t)info.PeakWorkingSetSize; #elif (defined(_AIX) || defined(__TOS__AIX__)) || (defined(__sun__) || defined(__sun) || defined(sun) && (defined(__SVR4) || defined(__svr4__))) /* AIX and Solaris ------------------------------------------ */ struct psinfo psinfo; int fd = -1; if ( (fd = open( "/proc/self/psinfo", O_RDONLY )) == -1 ) return (size_t)0L; /* Can't open? */ if ( read( fd, &psinfo, sizeof(psinfo) ) != sizeof(psinfo) ) { close( fd ); return (size_t)0L; /* Can't read? */ } close( fd ); return (size_t)(psinfo.pr_rssize * 1024L); #elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__)) /* BSD, Linux, and OSX -------------------------------------- */ struct rusage rusage; getrusage( RUSAGE_SELF, &rusage ); #if defined(__APPLE__) && defined(__MACH__) return (size_t)rusage.ru_maxrss; #else return (size_t)(rusage.ru_maxrss * 1024L); #endif #else /* Unknown OS ----------------------------------------------- */ return (size_t)0L; /* Unsupported. */ #endif } /** * Returns the current resident set size (physical memory use) measured * in bytes, or zero if the value cannot be determined on this OS. */ inline size_t getCurrentRSS( ) { #if defined(_WIN32) || defined( _WIN64 ) /* Windows -------------------------------------------------- */ PROCESS_MEMORY_COUNTERS info; GetProcessMemoryInfo( GetCurrentProcess( ), &info, sizeof(info) ); return (size_t)info.WorkingSetSize; #elif defined(__APPLE__) && defined(__MACH__) /* OSX ------------------------------------------------------ */ struct mach_task_basic_info info; mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; if ( task_info( mach_task_self( ), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount ) != KERN_SUCCESS ) return (size_t)0L; /* Can't access? */ return (size_t)info.resident_size; #elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) /* Linux ---------------------------------------------------- */ long rss = 0L; FILE* fp = NULL; if ( (fp = fopen( "/proc/self/statm", "r" )) == NULL ) return (size_t)0L; /* Can't open? */ if ( fscanf( fp, "%*s%ld", &rss ) != 1 ) { fclose( fp ); return (size_t)0L; /* Can't read? */ } fclose( fp ); return (size_t)rss * (size_t)sysconf( _SC_PAGESIZE); #else /* AIX, BSD, Solaris, and Unknown OS ------------------------ */ return (size_t)0L; /* Unsupported. */ #endif } #endif // MY_MISCELLANY_INCLUDED
8990.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute schedule(dynamic, 1) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
FillInLinearSystemImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include "open3d/core/linalg/kernel/SVD3x3.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/pipelines/kernel/FillInLinearSystem.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { #if defined(__CUDACC__) void FillInRigidAlignmentTermCUDA #else void FillInRigidAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_ps, const core::Tensor &Tj_qs, const core::Tensor &Ri_normal_ps, int i, int j, float threshold) { core::Device device = AtA.GetDevice(); int64_t n = Ti_ps.GetLength(); if (Tj_qs.GetLength() != n || Ri_normal_ps.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } // First fill in a small 12 x 12 linear system core::Tensor AtA_local = core::Tensor::Zeros({12, 12}, core::Dtype::Float32, device); core::Tensor Atb_local = core::Tensor::Zeros({12}, core::Dtype::Float32, device); float *AtA_local_ptr = static_cast<float *>(AtA_local.GetDataPtr()); float *Atb_local_ptr = static_cast<float *>(Atb_local.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const float *Ti_ps_ptr = static_cast<const float *>(Ti_ps.GetDataPtr()); const float *Tj_qs_ptr = static_cast<const float *>(Tj_qs.GetDataPtr()); const float *Ri_normal_ps_ptr = static_cast<const float *>(Ri_normal_ps.GetDataPtr()); #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *p_prime = Ti_ps_ptr + 3 * workload_idx; const float *q_prime = Tj_qs_ptr + 3 * workload_idx; const float *normal_p_prime = Ri_normal_ps_ptr + 3 * workload_idx; float r = (p_prime[0] - q_prime[0]) * normal_p_prime[0] + (p_prime[1] - q_prime[1]) * normal_p_prime[1] + (p_prime[2] - q_prime[2]) * normal_p_prime[2]; if (abs(r) > threshold) return; float J_ij[12]; J_ij[0] = -q_prime[2] * normal_p_prime[1] + q_prime[1] * normal_p_prime[2]; J_ij[1] = q_prime[2] * normal_p_prime[0] - q_prime[0] * normal_p_prime[2]; J_ij[2] = -q_prime[1] * normal_p_prime[0] + q_prime[0] * normal_p_prime[1]; J_ij[3] = normal_p_prime[0]; J_ij[4] = normal_p_prime[1]; J_ij[5] = normal_p_prime[2]; for (int k = 0; k < 6; ++k) { J_ij[k + 6] = -J_ij[k]; } // Not optimized; Switch to reduction if necessary. #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { atomicAdd(&AtA_local_ptr[i_local * 12 + j_local], J_ij[i_local] * J_ij[j_local]); } atomicAdd(&Atb_local_ptr[i_local], J_ij[i_local] * r); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical(FillInRigidAlignmentTermCPU) { for (int i_local = 0; i_local < 12; ++i_local) { for (int j_local = 0; j_local < 12; ++j_local) { AtA_local_ptr[i_local * 12 + j_local] += J_ij[i_local] * J_ij[j_local]; } Atb_local_ptr[i_local] += J_ij[i_local] * r; } *residual_ptr += r * r; } #endif }); // Then fill-in the large linear system std::vector<int64_t> indices_vec(12); for (int k = 0; k < 6; ++k) { indices_vec[k] = i * 6 + k; indices_vec[k + 6] = j * 6 + k; } std::vector<int64_t> indices_i_vec; std::vector<int64_t> indices_j_vec; for (int local_i = 0; local_i < 12; ++local_i) { for (int local_j = 0; local_j < 12; ++local_j) { indices_i_vec.push_back(indices_vec[local_i]); indices_j_vec.push_back(indices_vec[local_j]); } } core::Tensor indices(indices_vec, {12}, core::Dtype::Int64, device); core::Tensor indices_i(indices_i_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor indices_j(indices_j_vec, {12 * 12}, core::Dtype::Int64, device); core::Tensor AtA_sub = AtA.IndexGet({indices_i, indices_j}); AtA.IndexSet({indices_i, indices_j}, AtA_sub + AtA_local.View({12 * 12})); core::Tensor Atb_sub = Atb.IndexGet({indices}); Atb.IndexSet({indices}, Atb_sub + Atb_local.View({12, 1})); } #if defined(__CUDACC__) void FillInSLACAlignmentTermCUDA #else void FillInSLACAlignmentTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_Cps, const core::Tensor &Tj_Cqs, const core::Tensor &Cnormal_ps, const core::Tensor &Ri_Cnormal_ps, const core::Tensor &RjT_Ri_Cnormal_ps, const core::Tensor &cgrid_idx_ps, const core::Tensor &cgrid_idx_qs, const core::Tensor &cgrid_ratio_qs, const core::Tensor &cgrid_ratio_ps, int i, int j, int n_frags, float threshold) { int64_t n = Ti_Cps.GetLength(); if (Tj_Cqs.GetLength() != n || Cnormal_ps.GetLength() != n || Ri_Cnormal_ps.GetLength() != n || RjT_Ri_Cnormal_ps.GetLength() != n || cgrid_idx_ps.GetLength() != n || cgrid_ratio_ps.GetLength() != n || cgrid_idx_qs.GetLength() != n || cgrid_ratio_qs.GetLength() != n) { utility::LogError( "Unable to setup linear system: input length mismatch."); } int n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); // Geometric properties const float *Ti_Cps_ptr = static_cast<const float *>(Ti_Cps.GetDataPtr()); const float *Tj_Cqs_ptr = static_cast<const float *>(Tj_Cqs.GetDataPtr()); const float *Cnormal_ps_ptr = static_cast<const float *>(Cnormal_ps.GetDataPtr()); const float *Ri_Cnormal_ps_ptr = static_cast<const float *>(Ri_Cnormal_ps.GetDataPtr()); const float *RjT_Ri_Cnormal_ps_ptr = static_cast<const float *>(RjT_Ri_Cnormal_ps.GetDataPtr()); // Association properties const int *cgrid_idx_ps_ptr = static_cast<const int *>(cgrid_idx_ps.GetDataPtr()); const int *cgrid_idx_qs_ptr = static_cast<const int *>(cgrid_idx_qs.GetDataPtr()); const float *cgrid_ratio_ps_ptr = static_cast<const float *>(cgrid_ratio_ps.GetDataPtr()); const float *cgrid_ratio_qs_ptr = static_cast<const float *>(cgrid_ratio_qs.GetDataPtr()); #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { const float *Ti_Cp = Ti_Cps_ptr + 3 * workload_idx; const float *Tj_Cq = Tj_Cqs_ptr + 3 * workload_idx; const float *Cnormal_p = Cnormal_ps_ptr + 3 * workload_idx; const float *Ri_Cnormal_p = Ri_Cnormal_ps_ptr + 3 * workload_idx; const float *RjTRi_Cnormal_p = RjT_Ri_Cnormal_ps_ptr + 3 * workload_idx; const int *cgrid_idx_p = cgrid_idx_ps_ptr + 8 * workload_idx; const int *cgrid_idx_q = cgrid_idx_qs_ptr + 8 * workload_idx; const float *cgrid_ratio_p = cgrid_ratio_ps_ptr + 8 * workload_idx; const float *cgrid_ratio_q = cgrid_ratio_qs_ptr + 8 * workload_idx; float r = (Ti_Cp[0] - Tj_Cq[0]) * Ri_Cnormal_p[0] + (Ti_Cp[1] - Tj_Cq[1]) * Ri_Cnormal_p[1] + (Ti_Cp[2] - Tj_Cq[2]) * Ri_Cnormal_p[2]; if (abs(r) > threshold) return; // Now we fill in a 60 x 60 sub-matrix: 2 x (6 + 8 x 3) float J[60]; int idx[60]; // Jacobian w.r.t. Ti: 0-6 J[0] = -Tj_Cq[2] * Ri_Cnormal_p[1] + Tj_Cq[1] * Ri_Cnormal_p[2]; J[1] = Tj_Cq[2] * Ri_Cnormal_p[0] - Tj_Cq[0] * Ri_Cnormal_p[2]; J[2] = -Tj_Cq[1] * Ri_Cnormal_p[0] + Tj_Cq[0] * Ri_Cnormal_p[1]; J[3] = Ri_Cnormal_p[0]; J[4] = Ri_Cnormal_p[1]; J[5] = Ri_Cnormal_p[2]; // Jacobian w.r.t. Tj: 6-12 for (int k = 0; k < 6; ++k) { J[k + 6] = -J[k]; idx[k + 0] = 6 * i + k; idx[k + 6] = 6 * j + k; } // Jacobian w.r.t. C over p: 12-36 for (int k = 0; k < 8; ++k) { J[12 + k * 3 + 0] = cgrid_ratio_p[k] * Cnormal_p[0]; J[12 + k * 3 + 1] = cgrid_ratio_p[k] * Cnormal_p[1]; J[12 + k * 3 + 2] = cgrid_ratio_p[k] * Cnormal_p[2]; idx[12 + k * 3 + 0] = 6 * n_frags + cgrid_idx_p[k] * 3 + 0; idx[12 + k * 3 + 1] = 6 * n_frags + cgrid_idx_p[k] * 3 + 1; idx[12 + k * 3 + 2] = 6 * n_frags + cgrid_idx_p[k] * 3 + 2; } // Jacobian w.r.t. C over q: 36-60 for (int k = 0; k < 8; ++k) { J[36 + k * 3 + 0] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[0]; J[36 + k * 3 + 1] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[1]; J[36 + k * 3 + 2] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[2]; idx[36 + k * 3 + 0] = 6 * n_frags + cgrid_idx_q[k] * 3 + 0; idx[36 + k * 3 + 1] = 6 * n_frags + cgrid_idx_q[k] * 3 + 1; idx[36 + k * 3 + 2] = 6 * n_frags + cgrid_idx_q[k] * 3 + 2; } // Not optimized; Switch to reduction if necessary. #if defined(__CUDACC__) for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { float AtA_ij = J[ki] * J[kj]; int ij = idx[ki] * n_vars + idx[kj]; atomicAdd(AtA_ptr + ij, AtA_ij); } float Atb_i = J[ki] * r; atomicAdd(Atb_ptr + idx[ki], Atb_i); } atomicAdd(residual_ptr, r * r); #else #pragma omp critical(FillInSLACAlignmentTermCPU) { for (int ki = 0; ki < 60; ++ki) { for (int kj = 0; kj < 60; ++kj) { AtA_ptr[idx[ki] * n_vars + idx[kj]] += J[ki] * J[kj]; } Atb_ptr[idx[ki]] += J[ki] * r; } *residual_ptr += r * r; } #endif }); } #if defined(__CUDACC__) void FillInSLACRegularizerTermCUDA #else void FillInSLACRegularizerTermCPU #endif (core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &grid_idx, const core::Tensor &grid_nbs_idx, const core::Tensor &grid_nbs_mask, const core::Tensor &positions_init, const core::Tensor &positions_curr, float weight, int n_frags, int anchor_idx) { int64_t n = grid_idx.GetLength(); int64_t n_vars = Atb.GetLength(); float *AtA_ptr = static_cast<float *>(AtA.GetDataPtr()); float *Atb_ptr = static_cast<float *>(Atb.GetDataPtr()); float *residual_ptr = static_cast<float *>(residual.GetDataPtr()); const int *grid_idx_ptr = static_cast<const int *>(grid_idx.GetDataPtr()); const int *grid_nbs_idx_ptr = static_cast<const int *>(grid_nbs_idx.GetDataPtr()); const bool *grid_nbs_mask_ptr = static_cast<const bool *>(grid_nbs_mask.GetDataPtr()); const float *positions_init_ptr = static_cast<const float *>(positions_init.GetDataPtr()); const float *positions_curr_ptr = static_cast<const float *>(positions_curr.GetDataPtr()); #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t workload_idx) { // Enumerate 6 neighbors int idx_i = grid_idx_ptr[workload_idx]; const int *idx_nbs = grid_nbs_idx_ptr + 6 * workload_idx; const bool *mask_nbs = grid_nbs_mask_ptr + 6 * workload_idx; // Build a 3x3 linear system to compute the local R float cov[3][3] = {{0}}; float U[3][3], V[3][3], S[3]; int cnt = 0; for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (!mask_k) continue; int idx_k = idx_nbs[k]; // Now build linear systems float diff_ik_init[3] = {positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = {positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; // Build linear system by computing XY^T when formulating Y = RX // Y: curr // X: init for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { cov[i][j] += diff_ik_init[i] * diff_ik_curr[j]; } } ++cnt; } if (cnt < 3) { return; } core::linalg::kernel::svd3x3(*cov, *U, S, *V); float R[3][3]; core::linalg::kernel::transpose3x3_(*U); core::linalg::kernel::matmul3x3_3x3(*V, *U, *R); float d = core::linalg::kernel::det3x3(*R); if (d < 0) { U[2][0] = -U[2][0]; U[2][1] = -U[2][1]; U[2][2] = -U[2][2]; core::linalg::kernel::matmul3x3_3x3(*V, *U, *R); } // Now we have R, we build Hessian and residuals // But first, we need to anchor a point if (idx_i == anchor_idx) { R[0][0] = R[1][1] = R[2][2] = 1; R[0][1] = R[0][2] = R[1][0] = R[1][2] = R[2][0] = R[2][1] = 0; } for (int k = 0; k < 6; ++k) { bool mask_k = mask_nbs[k]; if (mask_k) { int idx_k = idx_nbs[k]; float diff_ik_init[3] = { positions_init_ptr[idx_i * 3 + 0] - positions_init_ptr[idx_k * 3 + 0], positions_init_ptr[idx_i * 3 + 1] - positions_init_ptr[idx_k * 3 + 1], positions_init_ptr[idx_i * 3 + 2] - positions_init_ptr[idx_k * 3 + 2]}; float diff_ik_curr[3] = { positions_curr_ptr[idx_i * 3 + 0] - positions_curr_ptr[idx_k * 3 + 0], positions_curr_ptr[idx_i * 3 + 1] - positions_curr_ptr[idx_k * 3 + 1], positions_curr_ptr[idx_i * 3 + 2] - positions_curr_ptr[idx_k * 3 + 2]}; float R_diff_ik_curr[3]; core::linalg::kernel::matmul3x3_3x1(*R, diff_ik_init, R_diff_ik_curr); float local_r[3]; local_r[0] = diff_ik_curr[0] - R_diff_ik_curr[0]; local_r[1] = diff_ik_curr[1] - R_diff_ik_curr[1]; local_r[2] = diff_ik_curr[2] - R_diff_ik_curr[2]; int offset_idx_i = 3 * idx_i + 6 * n_frags; int offset_idx_k = 3 * idx_k + 6 * n_frags; #if defined(__CUDACC__) // Update residual atomicAdd(residual_ptr, weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2])); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis], weight); atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis], -weight); atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis], -weight); // Update Atb: 2x1 atomicAdd(&Atb_ptr[offset_idx_i + axis], +weight * local_r[axis]); atomicAdd(&Atb_ptr[offset_idx_k + axis], -weight * local_r[axis]); } #else #pragma omp critical(FillInSLACRegularizerTermCPU) { // Update residual *residual_ptr += weight * (local_r[0] * local_r[0] + local_r[1] * local_r[1] + local_r[2] * local_r[2]); for (int axis = 0; axis < 3; ++axis) { // Update AtA: 2x2 AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_i + axis] += weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_k + axis] += weight; AtA_ptr[(offset_idx_i + axis) * n_vars + offset_idx_k + axis] -= weight; AtA_ptr[(offset_idx_k + axis) * n_vars + offset_idx_i + axis] -= weight; // Update Atb: 2x1 Atb_ptr[offset_idx_i + axis] += weight * local_r[axis]; Atb_ptr[offset_idx_k + axis] -= weight * local_r[axis]; } } #endif } } }); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
GB_binop__bshift_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint32) // C=scalar+B GB (_bind1st__bshift_uint32) // C=scalar+B' GB (_bind1st_tran__bshift_uint32) // C=A+scalar GB (_bind2nd__bshift_uint32) // C=A'+scalar GB (_bind2nd_tran__bshift_uint32) // C type: uint32_t // A type: uint32_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint32 (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint32 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT32 || GxB_NO_BSHIFT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bshift_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint32 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint32 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint32 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
particle_levelset_utilities.h
/* ============================================================================== KratosTestApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2010 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice sKRATOS_WATCH(disp);hall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: rrossi $ // Date: $Date: 2007-03-06 10:30:31 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED ) #define KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes #include "pybind11/pybind11.h" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "thermo_mechanical_application.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "processes/node_erase_process.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/timer.h" // #include <boost/random/linear_congruential.hpp> // #include <boost/random/uniform_int.hpp> // #include <boost/random/uniform_real.hpp> // #include <boost/random/variate_generator.hpp> // #include <boost/generator_iterator.hpp> // #include <tr1/random> #include <time.h> #ifdef _OPENMP #include "omp.h" #endif namespace Kratos { template<std::size_t TDim> class ParticleLevelSetUtils { public: KRATOS_CLASS_POINTER_DEFINITION(ParticleLevelSetUtils<TDim>); //********************************************************************************************** //********************************************************************************************** //function to seed a list of new nodes void Seed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; rLagrangianModelPart.Nodes().clear(); unsigned int ele_id = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetId(ele_id); ele_id++; } if(TDim==2){ BoundedMatrix<double, 16, 3 > pos; BoundedMatrix<double, 16, 3 > N; CreateParticles2D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } else { // BoundedMatrix<double, 56, 3 > pos; // BoundedMatrix<double, 56, 4 > N; // CreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); BoundedMatrix<double, 10, 3 > pos; BoundedMatrix<double, 10, 4 > N; FewCreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size); } for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY); // node_it->FastGetSolutionStepValue(DISTANCE, 1) = node_it->FastGetSolutionStepValue(DISTANCE); } KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void StreamlineMove(const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY array_1d<double, 3 > veulerian; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //KRATOS_WATCH("551") #pragma omp parallel for firstprivate(results,N,veulerian) for (int i = 0; i < nparticles; i++) { unsigned int substep = 0; unsigned int subdivisions = 1; double small_dt = dt; while(substep++ < subdivisions) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; (iparticle)->Set(TO_ERASE, true); Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; // KRATOS_WATCH("561") bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); // KRATOS_WATCH("564") if (is_found == true) { (pparticle)->GetValue(IS_VISITED) = 1; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY); //compute adaptive subdivisions if(substep == 1) { //compute h double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H); for (unsigned int k = 1; k < geom.size(); k++) h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H); //compute number of subdivisions needed const unsigned int min_subdivisions = 3; const unsigned int max_subdivisions = 20; double v = norm_2(veulerian); subdivisions = double(floor(2*dt*v/h)); subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions; //compute subdivisions time step small_dt = dt / subdivisions; // KRATOS_WATCH(subdivisions) } //move according to the streamline array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT); noalias(disp) += small_dt*veulerian; (pparticle)->Set(TO_ERASE, false); // KRATOS_WATCH("585") //update position noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition(); noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT); (iparticle)->GetValue(IS_VISITED) = 0; //KRATOS_WATCH("619") } } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleLevelSetCorrection(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY //Initilize NAGATIVE_DISTANCE & POSETIVE_DISTANCE const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; const double nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } //loop over particles double particle_dist= 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); //check if correction is needed const double p_sign = particle_itr->FastGetSolutionStepValue(IS_WATER); const double p_radi = particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS); if( particle_dist*p_sign < 0.0 && fabs(particle_dist) > p_radi) { double p_xx = particle_itr->X(); double p_yy = particle_itr->Y(); double p_zz = particle_itr->Z(); // const Variable<double> posetive_negative_dist_var; /* if( p_sign == -1.0 ) posetive_negative_dist_var = NAGATIVE_DISTANCE; else if( p_sign == 1.0 ) posetive_negative_dist_var = POSETIVE_DISTANCE; */ for (unsigned int kk = 1; kk < geom.size(); kk++){ p_xx -= geom[kk].X(); p_yy -= geom[kk].Y(); p_zz -= geom[kk].Z(); double dd = p_xx*p_xx + p_yy*p_yy + p_zz*p_zz; dd = sqrt(dd); double dist_to_particle = p_sign * (p_radi - dd); //correction due to particle distance and sign geom[kk].SetLock(); if( p_sign == 1.0){ double& pos_distance = geom[kk].GetValue(POSETIVE_DISTANCE); if ( dist_to_particle > pos_distance) pos_distance = dist_to_particle;} else if( p_sign == -1.0){ double& neg_distance = geom[kk].GetValue(NAGATIVE_DISTANCE); if ( dist_to_particle < neg_distance) neg_distance = dist_to_particle; } geom[kk].UnSetLock(); } } } }//end of loop over particles //final correction, choose between NAGATIVE_DISTANCE & POSETIVE_DISTANCE // const int nnodes= rEulerianModelPart.Nodes().size(); #pragma omp parallel for for (int jj = 0; jj < nnodes; jj++) { ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj; double posetive = node_itr->GetValue(POSETIVE_DISTANCE); double negative = node_itr->GetValue(NAGATIVE_DISTANCE); double & nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE); if ( posetive != negative){ if( fabs(posetive) < fabs(negative) ) nd_dist = posetive; else nd_dist = negative; node_itr->SetValue(POSETIVE_DISTANCE,nd_dist ); node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist ); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ResetParticleRadius(const double min_edge_length, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator) { KRATOS_TRY; double particle_dist = 0.0; Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N,particle_dist) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_pointer = *(particle_itr.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //interpolate the particle distance particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE); for (unsigned int k = 1; k < geom.size(); k++) particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE); if( fabs(particle_dist) < 0.1*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_length; else if(fabs(particle_dist) > 0.5*min_edge_length) particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_length; else particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(particle_dist); } } KRATOS_CATCH("") } //********************************************************************************************** //********************************************************************************************** void ParticleReseeding(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, const double max_seed_distance, const double min_edge_size) { KRATOS_TRY; //generate a tree with the position of the lagrangian nodes // typedef Node < 3 > PointType; // typedef Node < 3 > ::Pointer PointTypePointer; //unsigned int min_number_of_particles = 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { el_it->SetValue(YOUNG_MODULUS,0.0); } for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin(); pparticle != rLagrangianModelPart.NodesEnd(); pparticle++) { pparticle->Set(TO_ERASE,false); pparticle->SetValue(NL_ITERATION_NUMBER,(rEulerianModelPart.ElementsBegin())->Id()); pparticle->SetValue(IS_ESCAPED,false); pparticle->SetValue(IS_VISITED,0); } //count particles that fall within an element Vector N; const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rLagrangianModelPart.Nodes().size(); //count particles within an element #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i; Node < 3 > ::Pointer pparticle = *(iparticle.base()); typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { const double particle_sign = iparticle->FastGetSolutionStepValue(IS_WATER); Geometry< Node < 3 > >& geom = pelement->GetGeometry(); bool is_scaped = CheckIfEscaped(geom,N,particle_sign); iparticle->SetValue(IS_ESCAPED,is_scaped); if( CheckElemDist(geom,max_seed_distance) )// if it is inside the 3h band { double& counter = pelement->GetValue(YOUNG_MODULUS); #pragma omp atomic counter += 1.0; iparticle->SetValue(NL_ITERATION_NUMBER , pelement->Id()); } else { if( is_scaped == false) //delete if it is not an escaped particle iparticle->Set(TO_ERASE,true); } } } //loop over close to the surface elements to ressed or delet particles if(TDim==2){ ReseedOrDelete2D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size); } else { const int max_num_ptr = 16;//70; const int num_ptr = 10;//56; const int min_num_ptr = 6;//40; MarkEraseExtraParticles3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, max_num_ptr, num_ptr); ReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); FewReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr ); } //perform the erase NodeEraseProcess(rLagrangianModelPart).Execute(); KRATOS_CATCH(""); } //********************************************************************************************** //********************************************************************************************** void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart) { KRATOS_TRY; rCompleteModelPart.Elements() = rEulerianModelPart.Elements(); rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes(); unsigned int id; if(rEulerianModelPart.Nodes().size()!= 0) id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; else id = 1; //preallocate the memory needed int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size(); rCompleteModelPart.Nodes().reserve( tot_nodes ); //note that here we renumber the nodes for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin(); node_it != rLagrangianModelPart.NodesEnd(); node_it++) { rCompleteModelPart.AddNode(*(node_it.base())); node_it->SetId(id++); } KRATOS_CATCH(""); } //********************************************************************************** //********************************************************************************** void FindMaxMinEdgeSize(ModelPart& r_model_part, pybind11::list& maxmin) { KRATOS_TRY double max_edge = 0.0; double min_edge = 1000.0; for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++) { Geometry<Node<3> >&geom = it->GetGeometry(); double loc_h_max = 0.0; double loc_h_min = 1000.0; for(unsigned int i=0; i<TDim+1; i++) { double xc = geom[i].X(); double yc = geom[i].Y(); double zc = geom[i].Z(); for(unsigned int j=i+1; j<TDim+1; j++) { double x = geom[j].X(); double y = geom[j].Y(); double z = geom[j].Z(); double l = (x - xc)*(x - xc); l += (y - yc)*(y - yc); l += (z - zc)*(z - zc); if (l > loc_h_max) loc_h_max = l; else if(l < loc_h_min) loc_h_min = l; } } loc_h_max = sqrt(loc_h_max); loc_h_min = sqrt(loc_h_min); if(loc_h_max > max_edge ) max_edge = loc_h_max; if(loc_h_min < min_edge ) min_edge = loc_h_min; } // r_model_part.GetCommunicator().MaxAll(h_max); maxmin.append(max_edge); maxmin.append(min_edge); KRATOS_CATCH(""); } private: void CreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 56, 3 > pos, BoundedMatrix<double, 56, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void FewCreateParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 10, 3 > pos, BoundedMatrix<double, 10, 4 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { FewComputeGaussPointPositions3D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void CreateParticles2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BoundedMatrix<double, 16, 3 > pos, BoundedMatrix<double, 16, 3 > N, const double max_seed_distance, const double min_edge_size) { unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1; for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin(); el_it != rEulerianModelPart.ElementsEnd(); el_it++) { Geometry<Node < 3 > >& geom = el_it->GetGeometry(); if(CheckElemDist(geom,max_seed_distance)) { ComputeGaussPointPositions2D(geom, pos, N); for (unsigned int i = 0; i < pos.size1(); i++) { int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); } } } } void ReseedOrDelete2D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 16, 3 > coord; BoundedMatrix<double, 16, 3 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < 12 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions2D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr<16 ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } else if( n_ptr > 20 && CheckElemDist(geom,max_seed_distance) ){ const int ele_id = ielem->Id(); ModelPart::NodesContainerType element_particles; element_particles.reserve(64); //save particle list for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( ptr_nest==ele_id ) { iparticle->SetValue(SCALE, 0); element_particles.push_back( *(iparticle.base()) ); } } //loop to order based on the radius ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); unsigned int ptr_elem_size = element_particles.size(); for(unsigned int ii=0; ii < ptr_elem_size; ii++) for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) { double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; } //delete extra nodes int aux_ptr_elem_size = int(ptr_elem_size); while(aux_ptr_elem_size>16) { for(unsigned int ii=0; ii < ptr_elem_size; ii++){ bool swt = false; for( int kkk = ptr_elem_size; kkk>0; kkk-- ) if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); if( is_escaped==false ) (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES (ptr_begin + ii)->SetValue(IS_VISITED,1); swt = true; break; } if(swt ) break; } aux_ptr_elem_size -= 1; } } } } void MarkEraseExtraParticles3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int max_num_particle, const int num_particle) { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; const int nelements = rEulerianModelPart.Elements().size(); const int nparticles = rLagrangianModelPart.Nodes().size(); std::vector< GlobalPointersVector< Node< 3> > > particle_of_element(nelements); // particle_of_element.reserve(nelements); std::vector< unsigned int > num_ptr_in_elem(nelements,0); // num_ptr_in_elem.reserve(nelements); //loop on elements to resrve the size of particle in element list #pragma omp parallel for firstprivate(num_ptr_in_elem) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); unsigned int ele_id = ielem->Id(); num_ptr_in_elem[ele_id-1] = n_ptr; if(n_ptr > max_num_particle) particle_of_element[ele_id-1].reserve(n_ptr); } //loop on particles to push_back particle related to full elements for (int kk = 0; kk < nparticles; kk++) { ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); if( num_ptr_in_elem[ptr_nest-1] > static_cast<unsigned int>(max_num_particle) ) particle_of_element[ptr_nest-1].push_back( *(iparticle.base()) ); } //loop over elements to reoreder the particle radius in over populated elements #pragma omp parallel for firstprivate(particle_of_element) for( int ii = 0; ii< static_cast<int>(particle_of_element.size()); ++ii) { if(particle_of_element[ii].size() > static_cast<unsigned int>(max_num_particle)) { //sort std::sort(particle_of_element[ii].ptr_begin(), particle_of_element[ii].ptr_end(), RadiusCompare() ); //delete extra nodes GlobalPointersVector< Node< 3> >::iterator ele_pt_ptr = particle_of_element[ii].begin(); const unsigned int this_ele_ptr = particle_of_element[ii].size(); int aux_ptr_elem_size = this_ele_ptr; for( unsigned int ij = 0; (ij < this_ele_ptr && aux_ptr_elem_size > num_particle); ++ij) { bool is_escaped = (ele_pt_ptr + ij)->GetValue(IS_ESCAPED); if( is_escaped==false ){ (ele_pt_ptr + ij)->Set(TO_ERASE,true); aux_ptr_elem_size--; } } } } } struct RadiusCompare{ template<class TRefrenceType> bool operator()(const TRefrenceType ptr_a, const TRefrenceType ptr_b) { double a_radi = ptr_a.get()->FastGetSolutionStepValue(PARTICLE_RADIUS); double b_radi = ptr_b.get()->FastGetSolutionStepValue(PARTICLE_RADIUS); return (a_radi > b_radi); } }; void ReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 56, 3 > coord; BoundedMatrix<double, 56, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); ComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } void FewReseedPoorElements3D(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size, const int min_num_particle, const int num_particle) { int id; if (rLagrangianModelPart.Nodes().size() != 0) id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); else id = 1; const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); BoundedMatrix<double, 10, 3 > coord; BoundedMatrix<double, 10, 4 > NN; // #pragma omp parallel for firstprivate(NN,coord) for (int ne = 0; ne < nelements; ne++) { ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; Geometry<Node < 3 > >& geom = ielem->GetGeometry(); int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element { //compute cooordinates //RandomPariclePosition(geom, coord, NN); FewComputeGaussPointPositions3D(geom, coord, NN); int aux_n_ptr = n_ptr; int cnt = 0; while( aux_n_ptr < num_particle ){ aux_n_ptr++; //COORDINATES int node_id = id++; Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); noalias(vel) = ZeroVector(3); // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); double p_distance = 0.0; for (unsigned int j = 0; j < TDim + 1; j++){ noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); } // Assign particle sign if(p_distance < 0.0) pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; else if(p_distance > 0.0) pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; pnode->Fix(IS_WATER); AssignParticleRadius(pnode,p_distance,min_edge_size); cnt++; } } } } // void ReseedOrDelete3D(ModelPart& rEulerianModelPart, // ModelPart& rLagrangianModelPart, // const double max_seed_distance, // const double min_edge_size) // { // int id; // if (rLagrangianModelPart.Nodes().size() != 0) // id = (rLagrangianModelPart.NodesEnd() - 1)->Id(); // else // id = 1; // const int nelements = rEulerianModelPart.Elements().size(); // const int nparticles = rLagrangianModelPart.Nodes().size(); // // // // BoundedMatrix<double, 56, 3 > coord; // BoundedMatrix<double, 56, 4 > NN; // // #pragma omp parallel for firstprivate(NN,coord) // for (int ne = 0; ne < nelements; ne++) // { // ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne; // Geometry<Node < 3 > >& geom = ielem->GetGeometry(); // int n_ptr = int(ielem->GetValue(YOUNG_MODULUS)); // // if( n_ptr < 42 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element // { // //compute cooordinates // //RandomPariclePosition(geom, coord, NN); // ComputeGaussPointPositions3D(geom, coord, NN); // int aux_n_ptr = n_ptr; // int cnt = 0; // while( aux_n_ptr<56 ){ // aux_n_ptr++; // //COORDINATES // int node_id = id++; // Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2)); // // array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY); // noalias(vel) = ZeroVector(3); // // // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE); // double p_distance = 0.0; // for (unsigned int j = 0; j < TDim + 1; j++){ // noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY); // p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE); // } // // // Assign particle sign // if(p_distance < 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)=-1.0; // else if(p_distance > 0.0) // pnode->FastGetSolutionStepValue(IS_WATER)= 1.0; // // pnode->Fix(IS_WATER); // // AssignParticleRadius(pnode,p_distance,min_edge_size); // // cnt++; // } // } // else if( n_ptr > 70 && CheckElemDist(geom,max_seed_distance) ){ // const int ele_id = ielem->Id(); // ModelPart::NodesContainerType element_particles; // element_particles.reserve(64); // //save particle list // for (int kk = 0; kk < nparticles; kk++) // { // ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk; // // const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER); // if( ptr_nest==ele_id ) // { // iparticle->SetValue(SCALE, 0); // element_particles.push_back( *(iparticle.base()) ); // } // } // // //loop to order based on the radius // ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin(); // unsigned int ptr_elem_size = element_particles.size(); // // for(unsigned int ii=0; ii < ptr_elem_size; ii++) // for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++) // { // double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS); // double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS); // // (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1; // // } // //delete extra nodes // int aux_ptr_elem_size = int(ptr_elem_size); // while(aux_ptr_elem_size>56) // { // for(unsigned int ii=0; ii < ptr_elem_size; ii++){ // bool swt = false; // for( int kkk = ptr_elem_size; kkk>0; kkk-- ) // if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){ // bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED); // if( is_escaped==false ) // (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES // (ptr_begin + ii)->SetValue(IS_VISITED,1); // swt = true; // break; // } // if(swt ) // break; // } // aux_ptr_elem_size -= 1; // } // } // // // } // // } void ComputeGaussPointPositions2D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N) { //lower diagonal terms double ypos = 1.0 / 5.0; int pos_counter = 0; for (unsigned int i = 0; i < 4; i++) { double xpos = 1.0 / 8.0; for (unsigned int j = 0; j < (7-2*i); j++) { double N1 = xpos; double N2 = ypos; double N3 = 1.0 - xpos - ypos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; xpos += 1.0 / 8.0; pos_counter += 1; } ypos += 1.0 / 5.0; } } void ComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 56, 3 > & pos, BoundedMatrix<double, 56, 4 > & N) { int pos_counter = 0; const double one_seventh = 1.0/6.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 6; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (6-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (6-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_seventh;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_seventh;//y_div * (1.0 - zpos);//one_seventh } zpos += one_seventh; } } void FewComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 10, 3 > & pos, BoundedMatrix<double, 10, 4 > & N) { int pos_counter = 0; const double one_third = 1.0/2.5; double close_point = 1.0/20; double zpos = close_point; for (unsigned int kk = 0; kk < 3; kk++) { // double y_div = 1.0/(7 - kk); double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh for (unsigned int i = 0; i < (3-kk); i++) { // double x_div = 1.0/(7 - kk);// -i double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh for (unsigned int j = 0; j < (3-kk-i); j++) { double N1 = xpos; double N2 = ypos; double N3 = zpos; double N4 = 1.0 - xpos - ypos - zpos; pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X(); pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y(); pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z(); N(pos_counter, 0) = N1; N(pos_counter, 1) = N2; N(pos_counter, 2) = N3; N(pos_counter, 3) = N4; xpos += one_third;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh pos_counter += 1; } ypos += one_third;//y_div * (1.0 - zpos);//one_seventh } zpos += one_third; } } void RandomPariclePosition(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & coord, BoundedMatrix<double, 16, 3 > & N_shape) { for(int ii=0;ii<16;ii++){ double xi = rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ); double etta = (1.0 - xi) * ( rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ) ); double zetta = 1.0 - (xi + etta); coord(ii,0) = xi * geom[0].X() + etta * geom[1].X() + zetta * geom[2].X(); coord(ii,1) = xi * geom[0].Y() + etta * geom[1].Y() + zetta * geom[2].Y(); coord(ii,2) = xi * geom[0].Z() + etta * geom[1].Z() + zetta * geom[2].Z(); N_shape(ii,0) = xi; N_shape(ii,1) = etta; N_shape(ii,1) = zetta; } } static int CheckElemDist(Geometry< Node < 3 > >& geom, const double max_dist) { for(unsigned int ii=0; ii < geom.size(); ++ii) { double nd_dist = geom[ii].FastGetSolutionStepValue(DISTANCE); if (fabs(nd_dist) < max_dist) return 1; } return 0; } bool CheckIfEscaped(Geometry< Node < 3 > >& geom, const array_1d<double, 3 > & N_shape,const double particle_sign) { double dist = N_shape[0]*geom[0].FastGetSolutionStepValue(DISTANCE); for(unsigned int ii=1; ii < geom.size(); ++ii) dist += N_shape[ii]*geom[ii].FastGetSolutionStepValue(DISTANCE); if( dist*particle_sign < 0.0) return true; else return false; } void AssignParticleRadius(Node < 3 > ::Pointer nd_ptr, double& p_dist,const double min_edge_size) { if( fabs(p_dist) < 0.1*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_size; else if(fabs(p_dist) > 0.5*min_edge_size) nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_size; else nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(p_dist); } // unsigned int time_seed() // { // time_t now = time ( 0 ); // unsigned char *p = (unsigned char *)&now; // unsigned int seed = 0; // size_t i; // // for ( i = 0; i < sizeof now; i++ ) // seed = seed * ( UCHAR_MAX + 2U ) + p[i]; // // return seed; // } }; } #endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
vector.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.14 $ ***********************************************************************EHEADER*/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "seq_mv.h" #include <assert.h> /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr=0; if (vector) { if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector)); } hypre_TFree(vector); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); if ( ! hypre_VectorData(vector) ) hypre_VectorData(vector) = hypre_CTAlloc(double, num_vectors*size); if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else ++ierr; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr=0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; double *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet >>> */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; double *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors>1 ) { for ( j=0; j<num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { hypre_fprintf(fp, "%.14e\n", data[ j*vecstride + i*idxstride ] ); } } } else { for (i = 0; i < size; i++) { hypre_fprintf(fp, "%.14e\n", data[i]); } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, double value ) { double *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; size *=hypre_VectorNumVectors(v); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) vector_data[i] = value; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { double *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *=hypre_VectorNumVectors(v); /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) vector_data[i] = 2.0 * hypre_Rand() - 1.0; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * y should have already been initialized at the same size as x *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { double *x_data = hypre_VectorData(x); double *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i; HYPRE_Int ierr = 0; size *=hypre_VectorNumVectors(x); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) y_data[i] = x_data[i]; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneDeep( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize(y); hypre_SeqVectorCopy( x, y ); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( double alpha, hypre_Vector *y ) { double *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int i; HYPRE_Int ierr = 0; size *=hypre_VectorNumVectors(y); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) y_data[i] *= alpha; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( double alpha, hypre_Vector *x, hypre_Vector *y ) { double *x_data = hypre_VectorData(x); double *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i; HYPRE_Int ierr = 0; size *=hypre_VectorNumVectors(x); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) y_data[i] += alpha * x_data[i]; return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ double hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { double *x_data = hypre_VectorData(x); double *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int i; double result = 0.0; size *=hypre_VectorNumVectors(x); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) result += y_data[i] * x_data[i]; return result; } /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ double hypre_VectorSumElts( hypre_Vector *vector ) { double sum = 0; double * data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i=0; i<size; ++i ) sum += data[i]; return sum; }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads, 2*sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (size_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { const Quantum *magick_restrict p; Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset; if (extent != 0) modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; const Quantum *magick_restrict p; const void *magick_restrict r; Quantum *magick_restrict q; ssize_t i, u; unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs((double) (alpha-OpaqueAlpha)) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; ssize_t y; unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; Quantum *magick_restrict q; ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static inline MagickBooleanType ValidatePixelOffset(const ssize_t x, const size_t a) { if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a))) return(MagickFalse); if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a)) return(MagickFalse); return(MagickTrue); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit) || (ValidatePixelOffset(x,width) == MagickFalse) || (ValidatePixelOffset(y,height) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const unsigned char *magick_restrict p; ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const Quantum *magick_restrict p; ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
GB_AxB_rowscale_template.c
//------------------------------------------------------------------------------ // GB_AxB_rowscale_template: C=D*B where D is a square diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // This template is not used If C is iso, since all that is needed is to create // C as a shallow-copy of the pattern of A. // B and C can be jumbled. D cannot, but it is a diagonal matrix so it is // never jumbled. { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (!GB_JUMBLED (D)) ; ASSERT (GB_JUMBLED_OK (B)) ; ASSERT (!C->iso) ; //-------------------------------------------------------------------------- // get D and B //-------------------------------------------------------------------------- #if !GB_A_IS_PATTERN const GB_ATYPE *restrict Dx = (GB_ATYPE *) D->x ; #endif const bool D_iso = D->iso ; #if !GB_B_IS_PATTERN const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ; #endif const bool B_iso = B->iso ; const int64_t *restrict Bi = B->i ; const int64_t bnz = GB_nnz (B) ; const int64_t bvlen = B->vlen ; //-------------------------------------------------------------------------- // C=D*B //-------------------------------------------------------------------------- int ntasks = nthreads ; ntasks = GB_IMIN (bnz, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < ntasks ; tid++) { int64_t pstart, pend ; GB_PARTITION (pstart, pend, bnz, tid, ntasks) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = pstart ; p < pend ; p++) { int64_t i = GBI (Bi, p, bvlen) ; // get row index of B(i,j) GB_GETA (dii, Dx, i, D_iso) ; // dii = D(i,i) GB_GETB (bij, Bx, p, B_iso) ; // bij = B(i,j) GB_BINOP (GB_CX (p), dii, bij, 0, 0) ; // C(i,j) = dii*bij } } }
GB_unaryop__identity_int8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int64 // op(A') function: GB_tran__identity_int8_int64 // C type: int8_t // A type: int64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int64 ( int8_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_int16) // op(A') function: GB (_unop_tran__identity_uint16_int16) // C type: uint16_t // A type: int16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_int16) ( uint16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/fx-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % */ MagickExport FxInfo *AcquireFxInfo(const Image *images,const char *expression) { const Image *next; FxInfo *fx_info; register ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, % const ChannelType channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha, % Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *fx_info,const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, const double value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireMagickMemory(sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,const Image *image, ChannelType channel,const char *symbol,ExceptionInfo *exception) { char channel_symbol[MaxTextExtent], key[MaxTextExtent]; const double *value; double statistic; register const char *p; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; *channel_symbol='\0'; if (*p == '.') { ssize_t option; (void) CopyMagickString(channel_symbol,p+1,MaxTextExtent); option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol); if (option >= 0) channel=(ChannelType) option; } (void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) return(QuantumScale*(*value)); statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageChannelDepth(image,channel,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); statistic=mean; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageChannelRange(image,channel,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness, exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageChannelMean(image,channel,&mean,&standard_deviation, exception); statistic=standard_deviation; } if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; register size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace(c) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static inline double FxGCD(const double alpha,const double beta) { if (alpha < beta) return(FxGCD(beta,alpha)); if (fabs(beta) < 0.001) return(alpha); return(FxGCD(beta,alpha-beta*floor(alpha/beta))); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MaxTextExtent]; const char *artifact, *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; MagickPixelPacket pixel; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetMagickPixelPacket(image,&pixel); status=InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MaxTextExtent]; size_t length; (void) CopyMagickString(name,p,MaxTextExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { MagickPixelPacket *color; color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors, name); if (color != (MagickPixelPacket *) NULL) { pixel=(*color); p+=length; } else if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString(name), CloneMagickPixelPacket(&pixel)); p+=length; } } } (void) CopyMagickString(symbol,p,MaxTextExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedChannel: return(QuantumScale*pixel.red); case GreenChannel: return(QuantumScale*pixel.green); case BlueChannel: return(QuantumScale*pixel.blue); case OpacityChannel: { double alpha; if (pixel.matte == MagickFalse) return(1.0); alpha=(double) (QuantumScale*GetPixelAlpha(&pixel)); return(alpha); } case IndexChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } case DefaultChannels: return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((double) (QuantumScale*GetPixelAlpha(&pixel))); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case OpacityChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BlueChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case OpacityChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case IndexChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.index); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"intensity") == 0) return(QuantumScale*GetMagickPixelIntensity(image,&pixel)); if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminance; luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminance); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.opacity); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->x_resolution)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->y_resolution)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->x_resolution); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->y_resolution); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green), ClampToQuantum(pixel.blue),&hue,&saturation,&lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) { double depth; depth=(double) GetImageChannelDepth(image,channel,fx_info->exception); return(depth); } break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); artifact=GetImageArtifact(image,symbol); if (artifact != (const char *) NULL) return(StringToDouble(artifact,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; register const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { double gamma; (void) CopyMagickString(subexpression,++p,MaxTextExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MaxTextExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanChannel: type="cyan"; break; case MagentaChannel: type="magenta"; break; case YellowChannel: type="yellow"; break; case AlphaChannel: type="alpha"; break; case BlackChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedChannel: type="gray"; break; case AlphaChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedChannel: type="red"; break; case GreenChannel: type="green"; break; case BlueChannel: type="blue"; break; case AlphaChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6,MaxTextExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename, (double) x,(double) y,type,subexpression,GetMagickPrecision(), (double) alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { double gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD(alpha,*beta); FxReturn(gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn((double) QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn((double) QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { double alpha; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition,expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception); fx_info->file=file; return(status); } MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha, ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % Image *FxImageChannel(const Image *image,const ChannelType channel, % const char *expression,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { Image *fx_image; fx_image=FxImageChannel(image,GrayChannel,expression,exception); return(fx_image); } MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel, const char *expression,ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse) { InheritException(exception,&fx_image->exception); fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); double alpha; register IndexPacket *magick_restrict fx_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view); alpha=0.0; for (x=0; x < (ssize_t) fx_image->columns; x++) { if ((channel & RedChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y, &alpha,exception); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & GreenChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y, &alpha,exception); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & BlueChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y, &alpha,exception); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } if ((channel & OpacityChannel) != 0) { (void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y, &alpha,exception); if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange* alpha)); else SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange- QuantumRange*alpha))); } if (((channel & IndexChannel) != 0) && (fx_image->colorspace == CMYKColorspace)) { (void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y, &alpha,exception); SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType) QuantumRange*alpha)); } q++; } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
t_cholmod_gpu.c
/* ========================================================================== */ /* === GPU/t_cholmod_gpu ==================================================== */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* GPU BLAS template routine for cholmod_super_numeric. */ /* ========================================================================== */ /* === include files and definitions ======================================== */ /* ========================================================================== */ #ifdef GPU_BLAS #include <string.h> #include "cholmod_template.h" #undef L_ENTRY #ifdef REAL #define L_ENTRY 1 #else #define L_ENTRY 2 #endif /* ========================================================================== */ /* === gpu_clear_memory ===================================================== */ /* ========================================================================== */ /* * Ensure the Lx is zeroed before forming factor. This is a significant cost * in the GPU case - so using this parallel memset code for efficiency. */ void TEMPLATE2 (CHOLMOD (gpu_clear_memory)) ( double* buff, size_t size, int num_threads ) { int chunk_multiplier = 5; int num_chunks = chunk_multiplier * num_threads; size_t chunksize = size / num_chunks; size_t i; #pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic) for(i = 0; i < num_chunks; i++) { size_t chunkoffset = i * chunksize; if(i == num_chunks - 1) { memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) * sizeof(double)); } else { memset(buff + chunkoffset, 0, chunksize * sizeof(double)); } } } /* ========================================================================== */ /* === gpu_init ============================================================= */ /* ========================================================================== */ /* * Performs required initialization for GPU computing. * * Returns 0 if there is an error, so the intended use is * * useGPU = CHOLMOD(gpu_init) * * which would locally turn off gpu processing if the initialization failed. */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( void *Cwork, cholmod_factor *L, cholmod_common *Common, Int nsuper, Int n, Int nls, cholmod_gpu_pointers *gpu_p ) { Int i, k, maxSize ; cublasStatus_t cublasError ; cudaError_t cudaErr ; size_t maxBytesSize, HostPinnedSize ; #ifdef _WIN32 _clearfp(); _controlfp(_controlfp(0, 0) & ~(_EM_INVALID | _EM_ZERODIVIDE | _EM_OVERFLOW), _MCW_EM); #else feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif maxSize = L->maxcsize; /* #define PAGE_SIZE (4*1024) */ CHOLMOD_GPU_PRINTF (("gpu_init : %p\n", (void *) ((size_t) Cwork & ~(4*1024-1)))) ; /* make sure the assumed buffer sizes are large enough */ if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) { ERROR (CHOLMOD_GPU_PROBLEM,"\n\n" "GPU Memory allocation error. Ls, Map and RelativeMap exceed\n" "devBuffSize. It is not clear if this is due to insufficient\n" "device or host memory or both. You can try:\n" " 1) increasing the amount of GPU memory requested\n" " 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n" " 3) using a GPU & host with more memory\n" "This issue is a known limitation and should be fixed in a \n" "future release of CHOLMOD.\n") ; return (0) ; } /* divvy up the memory in dev_mempool */ gpu_p->d_Lx[0] = Common->dev_mempool; gpu_p->d_Lx[1] = (char*)Common->dev_mempool + Common->devBuffSize; gpu_p->d_C = (char*)Common->dev_mempool + 2*Common->devBuffSize; gpu_p->d_A[0] = (char*)Common->dev_mempool + 3*Common->devBuffSize; gpu_p->d_A[1] = (char*)Common->dev_mempool + 4*Common->devBuffSize; gpu_p->d_Ls = (char*)Common->dev_mempool + 5*Common->devBuffSize; gpu_p->d_Map = (char*)gpu_p->d_Ls + (nls+1)*sizeof(Int) ; gpu_p->d_RelativeMap = (char*)gpu_p->d_Map + (n+1)*sizeof(Int) ; /* Copy all of the Ls and Lpi data to the device. If any supernodes are * to be computed on the device then this will be needed, so might as * well do it now. */ cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int), cudaMemcpyHostToDevice ); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); if (!(Common->gpuStream[0])) { /* ------------------------------------------------------------------ */ /* create each CUDA stream */ /* ------------------------------------------------------------------ */ for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) { cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ; return (0) ; } } /* ------------------------------------------------------------------ */ /* create each CUDA event */ /* ------------------------------------------------------------------ */ for (i = 0 ; i < 3 ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) { cudaErr = cudaEventCreateWithFlags (&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ; if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ; return (0) ; } } cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete), cudaEventDisableTiming ); if (cudaErr != cudaSuccess) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ; return (0) ; } } gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool); for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) { gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) + k*Common->devBuffSize); } return (1); /* initialization successfull, useGPU = 1 */ } /* ========================================================================== */ /* === gpu_reorder_descendants ============================================== */ /* ========================================================================== */ /* Reorder the descendant supernodes as: * 1st - descendant supernodes eligible for processing on the GPU * in increasing (by flops) order * 2nd - supernodes whose processing is to remain on the CPU * in any order * * All of the GPU-eligible supernodes will be scheduled first. All * CPU-eligible descendants will overlap with the last (largest) * CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants. */ typedef int(*__compar_fn_t) (const void *, const void *); void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants)) ( cholmod_common *Common, Int *Super, Int *locals, Int *Lpi, Int *Lpos, Int *Head, Int *Next, Int *Previous, Int *ndescendants, Int *tail, Int *mapCreatedOnGpu, cholmod_gpu_pointers *gpu_p ) { Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1; Int dnext, ndrow2, p; Int n_descendant = 0; double score; /* use h_Lx[0] to buffer the GPU-eligible descendants */ struct cholmod_descendant_score_t* scores = (struct cholmod_descendant_score_t*) gpu_p->h_Lx[0]; double cpuref = 0.0; int nreverse = 1; int previousd; d = Head[*locals]; prevd = -1; firstcpu = -1; *mapCreatedOnGpu = 0; while ( d != EMPTY ) { /* Get the parameters for the current descendant supernode */ kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; /* compute a rough flops 'score' for this descendant supernode */ score = ndrow2 * ndcol; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { score += Common->devBuffSize; } /* place in sort buffer */ scores[n_descendant].score = score; scores[n_descendant].d = d; n_descendant++; d = nextd; } /* Sort the GPU-eligible supernodes */ qsort ( scores, n_descendant, sizeof(struct cholmod_descendant_score_t), (__compar_fn_t) CHOLMOD(score_comp) ); /* Place sorted data back in descendant supernode linked list*/ if ( n_descendant > 0 ) { Head[*locals] = scores[0].d; if ( n_descendant > 1 ) { #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ if (n_descendant > 64) for ( k=1; k<n_descendant; k++ ) { Next[scores[k-1].d] = scores[k].d; } } Next[scores[n_descendant-1].d] = firstcpu; } /* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe communications */ if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) { previousd = Head[*locals]; d = Next[Head[*locals]]; while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) { kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */ kd2 = Super [d+1] ; ndcol = kd2 - kd1 ; /* # of columns in all of d */ pdi = Lpi [d] ; /* pointer to first row of d in Ls */ pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */ p = Lpos [d] ; /* offset of 1st row of d affecting s */ pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */ ndrow2 = pdend - pdi1; nextd = Next[d]; nreverse++; if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) { /* place this supernode at the front of the list */ Next[previousd] = Next[d]; Next[d] = Head[*locals]; Head[*locals] = d; } else { previousd = d; } d = nextd; } } /* create a 'previous' list so we can traverse backwards */ *ndescendants = 0; if ( Head[*locals] != EMPTY ) { Previous[Head[*locals]] = EMPTY; for (d = Head [*locals] ; d != EMPTY ; d = dnext) { (*ndescendants)++; dnext = Next[d]; if ( dnext != EMPTY ) { Previous[dnext] = d; } else { *tail = d; } } } return; } /* ========================================================================== */ /* === gpu_initialize_supernode ============================================= */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode)) ( cholmod_common *Common, Int nscol, Int nsrow, Int psi, cholmod_gpu_pointers *gpu_p ) { cudaError_t cuErr; /* initialize the device supernode assemby memory to zero */ cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) ); CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)"); /* Create the Map on the device */ createMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), psi, nsrow ); return; } /* ========================================================================== */ /* === gpu_updateC ========================================================== */ /* ========================================================================== */ /* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1 * refers to all of the rows in L, but many of the rows are all zero. * Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range * k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows * in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. * Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ... * pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1 * rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower * triangular part of C1 needs to be computed since C1 is symmetric. * * UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer * d_Lx[] has been filled, all of the device operations are issues, and the * host can continue with filling the next input buffer / or start processing * all of the descendant supernodes which are not eligible for processing on * the device (since they are too small - will not fill the device). */ int TEMPLATE2 (CHOLMOD (gpu_updateC)) ( Int ndrow1, /* C is ndrow2-by-ndrow2 */ Int ndrow2, Int ndrow, /* leading dimension of Lx */ Int ndcol, /* L1 is ndrow1-by-ndcol */ Int nsrow, Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */ /* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */ Int pdi1, double *Lx, double *C, cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrLx, *devPtrC ; double alpha, beta ; cublasStatus_t cublasStatus ; cudaError_t cudaStat [2] ; Int ndrow3 ; int icol, irow; int iHostBuff, iDevBuff ; #ifndef NTIMER double tstart = 0; #endif if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) || (ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT)) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } ndrow3 = ndrow2 - ndrow1 ; #ifndef NTIMER Common->syrkStart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_SYRK_CALLS++ ; #endif /* ---------------------------------------------------------------------- */ /* allocate workspace on the GPU */ /* ---------------------------------------------------------------------- */ iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS, usually 2, so we can overlap the copy of this descendent supernode with the compute of the previous descendant supernode */ devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]); /* very little overlap between kernels for difference descendant supernodes (since we enforce the supernodes must be large enough to fill the device) so we only need one C buffer */ devPtrC = (double *)(gpu_p->d_C); /* ---------------------------------------------------------------------- */ /* copy Lx to the GPU */ /* ---------------------------------------------------------------------- */ /* copy host data to pinned buffer first for better H2D bandwidth */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32) for ( icol=0; icol<ndcol; icol++ ) { for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) { gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] = Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow]; } } cudaStat[0] = cudaMemcpyAsync ( devPtrLx, gpu_p->h_Lx[iHostBuff], ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]), cudaMemcpyHostToDevice, Common->gpuStream[iDevBuff] ); if ( cudaStat[0] ) { CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0])); return (0); } /* make the current stream wait for kernels in previous streams */ cudaStreamWaitEvent ( Common->gpuStream[iDevBuff], Common->updateCKernelsComplete, 0 ) ; /* ---------------------------------------------------------------------- */ /* create the relative map for this descendant supernode */ /* ---------------------------------------------------------------------- */ createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map), (Int *)(gpu_p->d_Ls), (Int *)(gpu_p->d_RelativeMap), pdi1, ndrow2, &(Common->gpuStream[iDevBuff]) ); /* ---------------------------------------------------------------------- */ /* do the CUDA SYRK */ /* ---------------------------------------------------------------------- */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream[iDevBuff]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } alpha = 1.0 ; beta = 0.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */ &alpha, /* ALPHA: 1 */ devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ devPtrC, ndrow2) ; /* C, LDC: C1 */ #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, (int) ndrow1, (int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/ &alpha, /* ALPHA: 1 */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* A, LDA: L1, ndrow2 */ &beta, /* BETA: 0 */ (cuDoubleComplex *) devPtrC, ndrow2) ; /* C, LDC: C1 */ #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } #ifndef NTIMER Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart; #endif /* ---------------------------------------------------------------------- */ /* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_CALLS++ ; tstart = SuiteSparse_time(); #endif if (ndrow3 > 0) { #ifndef REAL cuDoubleComplex calpha = {1.0,0.0} ; cuDoubleComplex cbeta = {0.0,0.0} ; #endif /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ #ifdef REAL alpha = 1.0 ; beta = 0.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ndrow3, ndrow1, ndcol, /* M, N, K */ &alpha, /* ALPHA: 1 */ devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/ ndrow2, /* ndrow */ devPtrLx, /* B, LDB: L1 */ ndrow2, /* ndrow */ &beta, /* BETA: 0 */ devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */ ndrow2) ; #else cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, ndrow3, ndrow1, ndcol, /* M, N, K */ &calpha, /* ALPHA: 1 */ (const cuDoubleComplex*) devPtrLx + ndrow1, ndrow2, /* ndrow */ (const cuDoubleComplex *) devPtrLx, ndrow2, /* ndrow */ &cbeta, /* BETA: 0 */ (cuDoubleComplex *)devPtrC + ndrow1, ndrow2) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } #ifndef NTIMER Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart; #endif /* ------------------------------------------------------------------ */ /* Assemble the update C on the device using the d_RelativeMap */ /* ------------------------------------------------------------------ */ #ifdef REAL addUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #else addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC, gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow, &(Common->gpuStream[iDevBuff]) ); #endif /* Record an event indicating that kernels for this descendant are complete */ cudaEventRecord ( Common->updateCKernelsComplete, Common->gpuStream[iDevBuff]); cudaEventRecord ( Common->updateCBuffersFree[iHostBuff], Common->gpuStream[iDevBuff]); return (1) ; } /* ========================================================================== */ /* === gpu_final_assembly =================================================== */ /* ========================================================================== */ /* If the supernode was assembled on both the CPU and the GPU, this will * complete the supernode assembly on both the GPU and CPU. */ void TEMPLATE2 (CHOLMOD (gpu_final_assembly)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nsrow, int supernodeUsedGPU, int *iHostBuff, int *iDevBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; Int iHostBuff2 ; Int iDevBuff2 ; if ( supernodeUsedGPU ) { /* ------------------------------------------------------------------ */ /* Apply all of the Shur-complement updates, computed on the gpu, to */ /* the supernode. */ /* ------------------------------------------------------------------ */ *iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; *iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* If this supernode is going to be factored using the GPU (potrf) * then it will need the portion of the update assembled ont the * CPU. So copy that to a pinned buffer an H2D copy to device. */ /* wait until a buffer is free */ cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] ); /* copy update assembled on CPU to a pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx]; } } /* H2D transfer of update assembled on CPU */ cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyHostToDevice, Common->gpuStream[*iDevBuff] ); } Common->ibuffer++; iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS; iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS; /* wait for all kernels to complete */ cudaEventSynchronize( Common->updateCKernelsComplete ); /* copy assembled Schur-complement updates computed on GPU */ cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0], nscol*nsrow*L_ENTRY*sizeof(double), cudaMemcpyDeviceToHost, Common->gpuStream[iDevBuff2] ); if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { /* with the current implementation, potrf still uses data from the * CPU - so put the fully assembled supernode in a pinned buffer for * fastest access */ /* need both H2D and D2H copies to be complete */ cudaDeviceSynchronize(); /* sum updates from cpu and device on device */ #ifdef REAL sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #else sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol ); #endif /* place final assembled supernode in pinned buffer */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; gpu_p->h_Lx[*iHostBuff][iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } else { /* assemble with CPU updates */ cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx]; } } } } return; } /* ========================================================================== */ /* === gpu_lower_potrf ====================================================== */ /* ========================================================================== */ /* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower * triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow. * * S is the top part of the supernode (the lower triangular matrx). * This function also copies the bottom rectangular part of the supernode (B) * onto the GPU, in preparation for gpu_triangular_solve. */ /* * On entry, d_A[1] contains the fully assembled supernode */ int TEMPLATE2 (CHOLMOD (gpu_lower_potrf)) ( Int nscol2, /* S is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of S */ Int psx, /* S is located at Lx + L_ENTRY*psx */ double *Lx, /* contains S; overwritten with Cholesky factor */ Int *info, /* BLAS info return value */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB, *A ; double alpha, beta ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ; int ilda, ijb, iinfo ; #ifndef NTIMER double tstart ; #endif if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT) { /* too small for the CUDA BLAS; use the CPU instead */ return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_POTRF_CALLS++ ; #endif nsrow2 = nsrow - nscol2 ; /* ---------------------------------------------------------------------- */ /* heuristic to get the block size depending of the problem size */ /* ---------------------------------------------------------------------- */ nb = 128 ; if (nscol2 > 4096) nb = 256 ; if (nscol2 > 8192) nb = 384 ; n = nscol2 ; gpu_lda = ((nscol2+31)/32)*32 ; lda = nsrow ; A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)% CHOLMOD_HOST_SUPERNODE_BUFFERS]; /* ---------------------------------------------------------------------- */ /* determine the GPU leading dimension of B */ /* ---------------------------------------------------------------------- */ gpu_ldb = 0 ; if (nsrow2 > 0) { gpu_ldb = ((nsrow2+31)/32)*32 ; } /* ---------------------------------------------------------------------- */ /* remember where device memory is, to be used by triangular solve later */ /* ---------------------------------------------------------------------- */ devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* ---------------------------------------------------------------------- */ /* copy A from device to device */ /* ---------------------------------------------------------------------- */ cudaStat = cudaMemcpy2DAsync ( devPtrA, gpu_lda * L_ENTRY * sizeof (devPtrA[0]), gpu_p->d_A[1], nsrow * L_ENTRY * sizeof (Lx[0]), nscol2 * L_ENTRY * sizeof (devPtrA[0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0] ); if ( cudaStat ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device"); } /* ---------------------------------------------------------------------- */ /* copy B in advance, for gpu_triangular_solve */ /* ---------------------------------------------------------------------- */ if (nsrow2 > 0) { cudaStat = cudaMemcpy2DAsync (devPtrB, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_p->d_A[1] + L_ENTRY*nscol2, nsrow * L_ENTRY * sizeof (Lx [0]), nsrow2 * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToDevice, Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } /* ------------------------------------------------------------------ */ /* define the dpotrf stream */ /* ------------------------------------------------------------------ */ cublasStatus = cublasSetStream (Common->cublasHandle, Common->gpuStream [0]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ; } /* ---------------------------------------------------------------------- */ /* block Cholesky factorization of S */ /* ---------------------------------------------------------------------- */ for (j = 0 ; j < n ; j += nb) { Int jb = nb < (n-j) ? nb : (n-j) ; /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dsyrk */ /* ------------------------------------------------------------------ */ alpha = -1.0 ; beta = 1.0 ; #ifdef REAL cublasStatus = cublasDsyrk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, devPtrA + j, gpu_lda, &beta, devPtrA + j + j*gpu_lda, gpu_lda) ; #else cublasStatus = cublasZherk (Common->cublasHandle, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j, &alpha, (cuDoubleComplex*)devPtrA + j, gpu_lda, &beta, (cuDoubleComplex*)devPtrA + j + j*gpu_lda, gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ cudaStat = cudaEventRecord (Common->cublasEventPotrf [0], Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream [1], Common->cublasEventPotrf [0], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } /* ------------------------------------------------------------------ */ /* copy back the jb columns on two different streams */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)*jb, jb, cudaMemcpyDeviceToHost, Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dgemm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = -1.0 ; beta = 1.0 ; cublasStatus = cublasDgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, (n-j-jb), jb, j, &alpha, devPtrA + (j+jb), gpu_lda, devPtrA + (j) , gpu_lda, &beta, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {-1.0,0.0} ; cuDoubleComplex cbeta = { 1.0,0.0} ; cublasStatus = cublasZgemm (Common->cublasHandle, CUBLAS_OP_N, CUBLAS_OP_C, (n-j-jb), jb, j, &calpha, (cuDoubleComplex*)devPtrA + (j+jb), gpu_lda, (cuDoubleComplex*)devPtrA + (j), gpu_lda, &cbeta, (cuDoubleComplex*)devPtrA + (j+jb + j*gpu_lda), gpu_lda ) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } } cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* compute the Cholesky factorization of the jbxjb block on the CPU */ /* ------------------------------------------------------------------ */ ilda = (int) lda ; ijb = jb ; #ifdef REAL LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #else LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ; #endif *info = iinfo ; if (*info != 0) { *info = *info + j ; break ; } /* ------------------------------------------------------------------ */ /* copy the result back to the GPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda), gpu_lda * L_ENTRY * sizeof (double), A + L_ENTRY * (j + j*lda), lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double) * jb, jb, cudaMemcpyHostToDevice, Common->gpuStream [0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } /* ------------------------------------------------------------------ */ /* do the CUDA BLAS dtrsm */ /* ------------------------------------------------------------------ */ if ((j+jb) < n) { #ifdef REAL alpha = 1.0 ; cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &alpha, devPtrA + (j + j*gpu_lda), gpu_lda, devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #else cuDoubleComplex calpha = {1.0,0.0}; cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, (n-j-jb), jb, &calpha, (cuDoubleComplex *)devPtrA + (j + j*gpu_lda), gpu_lda, (cuDoubleComplex *)devPtrA + (j+jb + j*gpu_lda), gpu_lda) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* -------------------------------------------------------------- */ /* Copy factored column back to host. */ /* -------------------------------------------------------------- */ cudaStat = cudaEventRecord (Common->cublasEventPotrf[2], Common->gpuStream[0]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaStreamWaitEvent (Common->gpuStream[1], Common->cublasEventPotrf[2], 0) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ; } cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda), lda * L_ENTRY * sizeof (double), devPtrA + L_ENTRY* (j + jb + j * gpu_lda), gpu_lda * L_ENTRY * sizeof (double), L_ENTRY * sizeof (double)* (n - j - jb), jb, cudaMemcpyDeviceToHost, Common->gpuStream[1]) ; if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ; } } } #ifndef NTIMER Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_triangular_solve ================================================= */ /* ========================================================================== */ /* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal * block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows * k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' = * S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in * MATLAB notation. */ /* Version with pre-allocation in POTRF */ int TEMPLATE2 (CHOLMOD (gpu_triangular_solve)) ( Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */ Int nscol2, /* L1 is nscol2-by-nscol2 */ Int nsrow, /* leading dimension of L1, L2, and S2 */ Int psx, /* L1 is at Lx+L_ENTRY*psx; * L2 at Lx+L_ENTRY*(psx+nscol2)*/ double *Lx, /* holds L1, L2, and S2 */ cholmod_common *Common, cholmod_gpu_pointers *gpu_p ) { double *devPtrA, *devPtrB ; cudaError_t cudaStat ; cublasStatus_t cublasStatus ; Int gpu_lda, gpu_ldb, gpu_rowstep ; Int gpu_row_start = 0 ; Int gpu_row_max_chunk, gpu_row_chunk; int ibuf = 0; int iblock = 0; int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) % CHOLMOD_HOST_SUPERNODE_BUFFERS; int i, j; Int iidx; int iwrap; #ifndef NTIMER double tstart ; #endif #ifdef REAL double alpha = 1.0 ; gpu_row_max_chunk = 768; #else cuDoubleComplex calpha = {1.0,0.0} ; gpu_row_max_chunk = 256; #endif if ( nsrow2 <= 0 ) { return (0) ; } #ifndef NTIMER tstart = SuiteSparse_time ( ) ; Common->CHOLMOD_GPU_TRSM_CALLS++ ; #endif gpu_lda = ((nscol2+31)/32)*32 ; gpu_ldb = ((nsrow2+31)/32)*32 ; devPtrA = gpu_p->d_Lx[0]; devPtrB = gpu_p->d_Lx[1]; /* make sure the copy of B has completed */ cudaStreamSynchronize( Common->gpuStream[0] ); /* ---------------------------------------------------------------------- */ /* do the CUDA BLAS dtrsm */ /* ---------------------------------------------------------------------- */ while ( gpu_row_start < nsrow2 ) { gpu_row_chunk = nsrow2 - gpu_row_start; if ( gpu_row_chunk > gpu_row_max_chunk ) { gpu_row_chunk = gpu_row_max_chunk; } cublasStatus = cublasSetStream ( Common->cublasHandle, Common->gpuStream[ibuf] ); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream"); } #ifdef REAL cublasStatus = cublasDtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &alpha, devPtrA, gpu_lda, devPtrB + gpu_row_start, gpu_ldb) ; #else cublasStatus = cublasZtrsm (Common->cublasHandle, CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT, gpu_row_chunk, nscol2, &calpha, (const cuDoubleComplex *) devPtrA, gpu_lda, (cuDoubleComplex *)devPtrB + gpu_row_start , gpu_ldb) ; #endif if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ; } /* ------------------------------------------------------------------ */ /* copy result back to the CPU */ /* ------------------------------------------------------------------ */ cudaStat = cudaMemcpy2DAsync ( gpu_p->h_Lx[iHostBuff] + L_ENTRY*(nscol2+gpu_row_start), nsrow * L_ENTRY * sizeof (Lx [0]), devPtrB + L_ENTRY*gpu_row_start, gpu_ldb * L_ENTRY * sizeof (devPtrB [0]), gpu_row_chunk * L_ENTRY * sizeof (devPtrB [0]), nscol2, cudaMemcpyDeviceToHost, Common->gpuStream[ibuf]); if (cudaStat) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ; } cudaEventRecord ( Common->updateCBuffersFree[ibuf], Common->gpuStream[ibuf] ); gpu_row_start += gpu_row_chunk; ibuf++; ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS; iblock ++; if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS ) { Int gpu_row_start2 ; Int gpu_row_end ; /* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been * scheduled, so check for completed events and copy result into * Lx before continuing. */ cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } } /* Convenient to copy the L1 block here */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private ( iidx ) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY + i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } /* now account for the last HSTREAMS buffers */ for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ ) { int i, j; Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS) *gpu_row_max_chunk; if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 && gpu_row_start2 < nsrow ) { Int iidx; Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk; if ( gpu_row_end > nsrow ) gpu_row_end = nsrow; cudaEventSynchronize ( Common->updateCBuffersFree [iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] ); /* copy into Lx */ #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx) if ( nscol2 > 32 ) for ( j=0; j<nscol2; j++ ) { for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } iblock++; } /* ---------------------------------------------------------------------- */ /* return */ /* ---------------------------------------------------------------------- */ #ifndef NTIMER Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ; #endif return (1) ; } /* ========================================================================== */ /* === gpu_copy_supernode =================================================== */ /* ========================================================================== */ /* * In the event gpu_triangular_sovle is not needed / called, this routine * copies the factored diagonal block from the GPU to the CPU. */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, double *Lx, Int psx, Int nscol, Int nscol2, Int nsrow, int supernodeUsedGPU, int iHostBuff, cholmod_gpu_pointers *gpu_p ) { Int iidx, i, j; if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) { cudaDeviceSynchronize(); #pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \ private(iidx,i,j) if (nscol>32) for ( j=0; j<nscol; j++ ) { for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) { iidx = j*nsrow*L_ENTRY+i; Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx]; } } } return; } #endif #undef REAL #undef COMPLEX #undef ZOMPLEX
opencl_blockchain_fmt_plug.c
/* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of * 2013 by Dhiru Kholia <dhiru at openwall.com>. * * See https://blockchain.info/wallet/wallet-format * This software is Copyright (c) 2012 Lukas Odzioba <ukasz at openwall.net> * and Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>, and it is * hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Improved detection, added iteration count and handle v2 hashes, Feb, 2015, JimF. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_blockchain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_blockchain); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "aes.h" #include "formats.h" #include "common.h" #include "jumbo.h" #include "common-opencl.h" #include "options.h" #include "blockchain_common.h" #define FORMAT_LABEL "blockchain-opencl" #define FORMAT_NAME "blockchain My Wallet" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define BIG_ENOUGH (8192 * 32) // increase me (in multiples of 16) to increase the decrypted and search area #define SAFETY_FACTOR 160 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } blockchain_password; typedef struct { uint32_t v[32/4]; } blockchain_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } blockchain_salt; static int *cracked; static int any_cracked; static struct custom_salt *cur_salt; static cl_int cl_error; static blockchain_password *inbuffer; static blockchain_hash *outbuffer; static blockchain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(blockchain_password) * gws; outsize = sizeof(blockchain_hash) * gws; settingsize = sizeof(blockchain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(blockchain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->data, 16); currentsalt.length = 16; currentsalt.iterations = cur_salt->iter; currentsalt.outlen = 32; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!blockchain_decrypt((unsigned char*)outbuffer[index].v, cur_salt->data)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_blockchain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, blockchain_tests }, { init, done, reset, fmt_default_prepare, blockchain_common_valid, fmt_default_split, fmt_default_binary, blockchain_common_get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
electrum_fmt_plug.c
/* * JtR format to crack password protected Electrum Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to Christopher Gurnee for making this work possible. */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 #endif #if HAVE_LIBZ #if FMT_EXTERNS_H extern struct fmt_main fmt_electrum; #elif FMT_REGISTERS_H john_register_one(&fmt_electrum); #else #include <string.h> #include <zlib.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #endif #include <openssl/bn.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "sha2.h" #include "jumbo.h" #include "secp256k1.h" #include "pbkdf2_hmac_sha512.h" #include "hmac_sha.h" #include "memdbg.h" #define FORMAT_NAME "Electrum Wallet" #define FORMAT_LABEL "electrum" #define FORMAT_TAG "$electrum$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "SHA256 AES / PBKDF2-SHA512 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "SHA256 AES / PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests electrum_tests[] = { // Wallet created by Electrum 1.9.8 {"$electrum$1*d64ac297de09893a03bc540b346d5e97*0af493539c512e3ded466b4879b8a47b", "openwall123"}, // Electrum 2.8.0+ encrypted wallet {"$electrum$4*03c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864*4249453103c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864355ed45b963901b56cd6c483468247c7c8c76ba11c9cb94633575838cffb8f0cebfc9af91ba402c06cca5c08238c643a0291e66e1a849eb66a9eda17e1496d09f46bfe6f63bfdcd591c260f31b92bd5958ce85c7719983a7395c88570946a59d5dcc2188680aba439cde0dbdfeaba985fe3d1a97d25b81573a92f72aea8c60fa3a4228acb789d7f307f6a19d1025fa6ac81d91d45ef07c0b26d9f85fc6ba07246b8b19d641929aac16ff1c942a3d69b824e3e39a122402aed63d3d12ca299416500459e7353bd56db92102c93f045ccc719cee90d2f891ff6b128886ec90768364bcc89c3393f21a5b57915f4eaf4e3b9c7a3958124b43956a47572ae38df2a11b84f6dc25ddc3d3b1968e3adadc756507118301e8cc490d249dc603f4f46c3bf0b214fd3bfb8dab6f048ba7d60dbee031d386a5aeec6664d2891abbeb0201b437d6e37c140be3e6210078e76afafbd78a8acaf45f21cf83c69218f9bfd3abb0211d57ab1874e9d645171cdaad4887a9fea86003b9948d22d9e7bfaec4c4bd0786cd4d191c82c61e83c61bae06a7c9936af46f8fa121ab696aba24ad8fd8f69537aa713bf271e4be567e7e3ccd141511c96ce634175f845ff680f71bbd595ef5d45d9cfd9a7e099fbab7964add7a76c4820b20952121e5621cb53c9476dc23860a5bc4ba3ecf636dc224503202dc11bf3bc88c70dcc2005684f7d3ebe6a7ea1487423a5145442f8f3d806d5d219560b4bce272ef9d6e32849b692cd91d4c60462b0f813603a52dc84b959051e787d890661e9f439a11fa8819c4fb947ff8dd0a5b7e5e63605f4e9f6eac6f8b2bfd7a9098dd2201c2f4cdaa2d7d0691ccf42b2761a8bb2a08c755077a753a41bcf305c83da8cd9ebaeee0360afb4be00827e167b2c1a3d5975d3a4a1e3b3b56794a155253437710ee3c0d0a2de0c4d631b48808fa946146f09e8ea9888d6c6bad104ebed814e79bdc26be38e8580d8fff6324405c128627079d1e3bafc2479274a3bc4f8196e923c835204e91ce8a9cb235c5349056415ad58a83b41254eda57839cd2e0bb66f125e32c76671f6447b2b0321d021c60706ff6f103ce483986fe0f1cc62307f6a1e89c4b2f334fc6f1f2597f5d68b3948c7655025a04ea858bc33eb341de09bdb4862701abcbc4c907270856de6072ee8d0c9e46e19c50eac454d4ca5fcd1a35f5d239aadc82543deafcd17f0eae2145561b8834dd80d337c574d3e931365db294d66aa4b47669f92784325b85abae49a8447a2afeb4cac460cba2a9d7b298bd3f69ac31862b92a970ed8d3241227858b0c40b2f6793cdd733020987beb7e6f01826fa2dae2b345f4e8e96da885a00901b20308f37c8613cf28ef997a6f25c741af917a547b38cff7577d2cac2654d5cdac2d0f1135ac6db3d70174b03c4149d134325f1b805ef11cd62531c13436ad1c7cb73f488dc411d349be34523d477953e8b47848e31ec85230a99ecd88c9cbc5d33de132aacd04877123cff599bea3b2e7b931347673cca605b3bc129496d5e80b06ae0eb3fce5c24ea0f8d2ecd4cfb9ed5034b26ed18b564731c78f5344ec863bd78797ad7de722c7a88e047af0364f69a303dc5f716ebda1de9ca21cb49e4091cb975c17f098932e884f36bded1fab34814931b0aeb72b1bc90747f7f5ebe73c547681f7a8d6d74e7acde2ba6e5e998bd6b035ade5fa64171dde4a82ed5ed7f273220d47bbd5a1c2ed4359d02392b746ba653d1c30f63bce161d0555ebc4775262036be51d4a50113bbac6823fd6a0d387a32673dc454c4d9d018cc25885a0d15d3f7488bbe18398d758cbbf1a24eaf71bd1560ff216e342e09efdbfae2872cfdf59ed802420ba8522edfd74f6d728ffa1683e586b53cbec80f00be6478a44d8df1c69a5cdbb50aa75da2f2dd0a679b037b4173f20b9514064d15ff50f1e9beb0112a41cdc0ecf7fb3028fe6f4c7339bb79d50cb7d43cabd8ae198741677d41e411c811c6267e9b4e41d944b035e47406d5120f1ee192db810cf6774*40c7a179573d57c54d0da0a1c4d71e306e1eea823f637f29c3e43b9792469d15", "openwall123"}, // Wallet created by Electrum 1.9.8 {"$electrum$1*bb7feb604201d0e74135337ca33249c4*090a4001b972c7483116471aa1598a84", "password@12345"}, // 1.x to 2.4.3 upgrade generates same hash // Wallet created by Electrum 2.4.3 {"$electrum$2*ca2a36958ea86cafd91be8f4806f073a*259129742f91f72e14d048fa0a1a0acf", "openwall"}, // Wallet created by Electrum 2.6.3 {"$electrum$2*3e37a6b705ea4e61884433c735edd0ff*dbfeaef2ea18df11016be57ed2a66b9d", "openwall"}, // Electrum 2.8.3 2FA wallet {"$electrum$2*af6348b949824312bad6fd6c16363c1c*a645e1f547174ce950884936777b3842", "openwall"}, // Electrum 1.x wallet upgraded to 2.8.3 {"$electrum$1*8f664b711d89cba39e1af76928832776*6c563922cf8630d46daeb10f90442499", "openwall123"}, // Electrum 2.6.4 wallet created by selecting "import keys" option during initialization, wallet_type == "imported" {"$electrum$3*390c9a6dea1160f17c263cabaf8e1d74*7edc571ab41253406c9ad18fc925a4ee", "openwall"}, // Similar wallet as above {"$electrum$3*e4a1a7f27bb2df7d0bbf91d769adb29b*9340ec01561bf8bc6240627bee4f84a5", "password@123456789"}, // Electrum 2.8.0+ encrypted wallet with truncated hash, "electrum28-wallet" from btcrecover project {"$electrum$5*0328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de4*61b1e287a5acff4b40e4abd73ff62dc233c1c7a6a54b3270949281b9d44bc6e746743733360500718826e50bb28ea99a6378dc0b0c578e9d0bf09c667671c82a1bd71c8121edbb4c9cbca93ab0e17e218558ead81755e62b0d4ad547aa1b3beb0b9ee43b11270261c9b38502f00e7f6f096811b7fdae6f3dce85c278d3751fec044027054218ccf20d404bab24380b303f094704e626348a218f44ab88ce2ac5fa7d450069fca3bb53f9359dbbaad0ea1b3859129b19c93ed7888130f8a534f84a629c67edc150a1c5882a83cb0add4615bb569e8dc471de4d38fc8b1e0b9b28040b5ea86093fcdeceaedb6b8f073f6f0ee5541f473a4b1c2bfae4fc91e4bbb40fa2185ecfa4c72010bcf8df05b1a7db45f64307dbc439f8389f0e368e38960b6d61ac88c07ce95a4b03d6d8b13f4c7dc7d7c447097865235ab621aeef38dc4172bf2dc52e701132480127be375fe98834f16d9895dce7f6cdfe900a2ce57eaa6c3036c1b9a661c3c9adbf84f4adfe6d4d9fa9f829f2957cfb353917dc77fd8dd4872b7d90cb71b7d3a29c9bfe3440e02449220acba410fa0af030f51aa2438f7478dbb277d62613112e4eebc66d5d7bdba793fb2073d449954f563284819189ffb5dbcdeb6c95c64bc24e0ef986bce07bafe96ab449ae2b6edaf4f98ffbd392a57bd93c2359444ec4046ae65b440adb96b6e4eef9d06bb04d2f3fa2e4175165bcadbf7e13cc3b6e65e67df901f96a2f154bc763b56b3736a335e1d1bc16e99736f757a4ae56c099645c917360b1ecf8dcefc7281541c6ff65d87cadab4a48f1f6b7b73a3e5a67e2e032abb56b499e73a9f3b69ce065e43b0174639785ae30635d105ebcc827dcf9b19bdd1a92879a5d4bc4e12b5630c188b1b96e3c586e19901b8f96084bcd59b2f4b201a3a8b6e633a5c194901d4609add9671b0bcc12b2b94ae873d201258b36315484e4b9c5f5d6289656baa93eec9e92aec88e2d73d86b9e3d1f24294e3d8ebe9a9f2f6edfbf28f530670c5b086fc4f74df89b4e4cbe06ee7e45cbd238b599d19c2d5da5523b12b1e7050ea0a9b47a5d22c6c3fc476f814f9705dc7ed3aeb1b44fc6b4d69f02a74963dce5057c3c049f92e595a4da5035cffc303a4cb162803aa3f816527a7e466b8424789a0d77e26819615662420c370457e29fcc1938fd754f3acfd21416ce3ab27e9febbc0e24fc7055eddc31e48faa014f9f3695c2e956f0e6c94c507a8d2f8c3aeb4b98b69b6340b6a3acb1acdde9581279f78ee10687616360c018e9f67d6c8bb5950e8fdabd3d0d5808824975aa4a50f88581472212f24ad58a700fe4787642b973924575fe71d1ecd7b2b6acd363f48c40bdd55f35f60a06dee544c266e608fd5a6d263f745e8b11d1160638eb301adfd1a88eddf6d0ccb9e1021e0bde9cf5163583a202b3dc95c255c8cc24*ec90c1ff54632e7c8cfb812eeb14d7ec49ddaf576dca10bfb16f965e6106ce48", "btcr-test-password"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char iv[16]; unsigned char seed[64]; unsigned char ephemeral_pubkey[128]; unsigned char data[16384]; // is 16 KiB enough? uint32_t datalen; unsigned char mac[32]; secp256k1_pubkey pubkey; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2 && value != 3 && value != 4 && value != 5) goto err; if (value == 1 || value == 2 || value == 3) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted data (seed part) goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } else { if ((p = strtokm(NULL, "*")) == NULL) // ephemeral_pubkey goto err; if (hexlenl(p, &extra) > 128 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 16384 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 32 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1 || cs.type == 2 || cs.type == 3) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.seed[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { secp256k1_context *ctx; int length = strlen(p) / 2; for (i = 0; i < length; i++) cs.ephemeral_pubkey[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); cs.datalen = strlen(p) / 2; for (i = 0; i < cs.datalen; i++) cs.data[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.mac[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); secp256k1_ec_pubkey_parse(ctx, &cs.pubkey, cs.ephemeral_pubkey, length); secp256k1_context_destroy(ctx); } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void electrum_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static const char *group_order = "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"; // The decypted and decompressed wallet should start with one of these two, // Christopher Gurnee #define EXPECTED_BYTES_1 "{\n \"" #define EXPECTED_BYTES_2 "{\r\n \"" static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*cracked_count); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { unsigned char iv[16]; unsigned char key[32]; SHA256_CTX ctx; AES_KEY aes_decrypt_key; int extra; unsigned char static_privkey[MAX_KEYS_PER_CRYPT][64]; int i, j; if (cur_salt->type == 1 || cur_salt->type == 2 || cur_salt->type == 3) { for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { unsigned char outbuf[48] = { 0 }; SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA256_Final(key, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, key, 32); SHA256_Final(key, &ctx); memcpy(iv, cur_salt->iv, 16); AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->seed, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (cur_salt->type == 1) { // check if 16 bytes of the encrypted seed are all lower-case hex (btcrecover) outbuf[16] = 0; if (hexlenl((const char*)outbuf, &extra) != 8 * 2 || extra) cracked[index+i] = 0; else cracked[index+i] = 1; } else if (cur_salt->type == 2) { // check if starting 4 bytes are "xprv" if (strncmp((const char*)outbuf, "xprv", 4)) cracked[index+i] = 0; else { // check if remaining 12 bytes are in base58 set [1-9A-HJ-NP-Za-km-z] for (j = 0; j < 12; j++) { unsigned char c = outbuf[4 + j]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { cracked[index+i] = 0; break; } } if (j == 12) cracked[index+i] = 1; } } else if (cur_salt->type == 3) { unsigned char padbyte = outbuf[15]; // check for valid PKCS7 padding for a 52 or 51 byte "WIF" private key, 64 is the original data size if (padbyte == 12 || padbyte == 13) { if (check_pkcs_pad(outbuf, 16, 16) < 0) cracked[index+i] = 0; else cracked[index+i] = 1; } else { cracked[index+i] = 0; } } } } else if (cur_salt->type == 4 || cur_salt->type == 5) { BIGNUM *p, *q, *r; BN_CTX *ctx; unsigned char shared_pubkey[33]; unsigned char keys[128]; unsigned char cmac[32]; secp256k1_context *sctx; SHA512_CTX md_ctx; int shared_pubkeylen= 33; #ifdef SIMD_COEF_64 int len[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; pout[i] = static_privkey[i]; } pbkdf2_sha512_sse((const unsigned char **)pin, len, (unsigned char*)"", 0, 1024, pout, 64, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { pbkdf2_sha512((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), (unsigned char*)"", 0, 1024, static_privkey[i], 64, 0); } #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { // do static_privkey % GROUP_ORDER p = BN_bin2bn(static_privkey[i], 64, NULL); q = BN_new(); r = BN_new(); BN_hex2bn(&q, group_order); ctx = BN_CTX_new(); BN_mod(r, p, q, ctx); BN_CTX_free(ctx); BN_free(p); BN_free(q); BN_bn2bin(r, static_privkey[i]); BN_free(r); sctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); // multiply point with a scaler, shared_pubkey is compressed representation secp256k1_mul(sctx, shared_pubkey, &cur_salt->pubkey, static_privkey[i]); secp256k1_context_destroy(sctx); SHA512_Init(&md_ctx); SHA512_Update(&md_ctx, shared_pubkey, shared_pubkeylen); SHA512_Final(keys, &md_ctx); if (cur_salt->type == 4) { // calculate mac of data hmac_sha256(keys + 32, 32, cur_salt->data, cur_salt->datalen, cmac, 32); if (memcmp(&cur_salt->mac, cmac, 16) == 0) cracked[index+i] = 1; else cracked[index+i] = 0; } else if (cur_salt->type == 5) { z_stream z; unsigned char iv[16]; unsigned char out[512] = { 0 }; unsigned char fout[512] = { 0 }; AES_KEY aes_decrypt_key; // common zlib settings z.zalloc = Z_NULL; z.zfree = Z_NULL; z.opaque = Z_NULL; z.avail_in = 512; z.avail_out = 512; z.next_out = fout; memcpy(iv, keys, 16); // fast zlib based rejection test, is this totally safe? AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data, out, 16, &aes_decrypt_key, iv, AES_DECRYPT); if ((memcmp(out, "\x78\x9c", 2) != 0) || (out[2] & 0x7) != 0x5) { cracked[index+i] = 0; } else { AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data + 16, out + 16, 512 - 16, &aes_decrypt_key, iv, AES_DECRYPT); z.next_in = out; inflateInit2(&z, 15); inflate(&z, Z_NO_FLUSH); inflateEnd(&z); if ((memcmp(fout, EXPECTED_BYTES_1, 7) == 0) || (memcmp(fout, EXPECTED_BYTES_2, 8) == 0)) cracked[index+i] = 1; else cracked[index+i] = 0; } } } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } /* report kdf type as tunable cost */ static unsigned int get_kdf_type(void *salt) { struct custom_salt *cs = salt; if (cs->type == 1 || cs->type == 2 || cs->type == 3) return 1; // SHA256 based KDF else return 2; // PBKDF2-SHA512 } struct fmt_main fmt_electrum = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "kdf [1:SHA256 2:PBKDF2-SHA512]", }, { FORMAT_TAG }, electrum_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { get_kdf_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, electrum_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_LIBZ */
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1)*cosine)+ fabs((double) (image->rows-1)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) (MagickMax((image->columns-1), (image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1)* (image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1)/2.0; gradient->radii.y=(double) (image->rows-1)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) MagickMin((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **magick_restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict paint_indexes; register ssize_t x; register PixelPacket *magick_restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); ConformMagickPixelPacket(image,fill,&conform_fill,exception); ConformMagickPixelPacket(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,&conform_target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(conform_fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(conform_fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(conform_fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_unaryop__identity_int8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int16 // op(A') function: GB_tran__identity_int8_int16 // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int16 ( int8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi2d.c
#include <stdio.h> #include <stdlib.h> #define ROW 5 #define COL 5 #define LIMIT 2 int main(){ float *A = (float *)malloc(ROW*COL*sizeof(float)); //Matriz creada float *B = (float *)malloc(ROW*COL*sizeof(float)); //2da Matriz, que usaremos para calcular valores. //Primero vamos a inicializar la matriz A, dandole valores for(int i=0; i<ROW; ++i) for(int j=0;j<COL;j++){ if(i==0||i==ROW-1||j==0||j==COL-1) *(A+(i*ROW)+j)=150.0f; else *(A+(i*ROW)+j)=(float) (rand()%10); } printf("Valor del array A - \n"); for(int i=0;i<ROW*COL;++i){ printf("%f ,",*(A+i)); if(i!=0 && (i+1)%5==0) printf("\n"); } //Forma 2 Jacobi, extremos = 150 y solo sumo por el interior. for(int k=0; k<LIMIT;++k){ #pragma omp parallel for num_threads(2) for(int i=1;i<(ROW-1);++i){ for(int j=1;j<(COL-1);++j){ int index = i*ROW+j; B[index]=A[index]+A[index+1]+A[index-1]+A[index+ROW]+A[index-ROW]; } } float *aux=A; A=B; B=aux; } //Este es el valor la matriz que contiene los valores anteriores. printf("\nValor del array: \n"); for(int i=0;i<ROW*COL;++i){ if(LIMIT%2==0) printf("%f ,",*(A+i)); else printf("%f ,",*(B+i)); if(i!=0 && (i+1)%5==0) printf("\n"); } }
test_omp.c
#include <nautilus/nautilus.h> #include <rt/omp/omp.h> #define N 4 volatile float a[N]; volatile float b[N]; volatile float c[N]; static int omp_simple() { int i; for (i=0;i<N;i++) { a[i] = i; b[i] = i; } #pragma omp parallel nk_vc_printf("I am thread %d (%d total)\n",omp_get_thread_num(),omp_get_num_threads()); #pragma omp parallel for for (i=0;i<N;i++) { c[i] = a[i] * b[i]; } for (i=0;i<N;i++) { nk_vc_printf("a[%d]=%d b[%d]=%d c[%d]=%d\n",i,(int)a[i],i,(int)b[i],i,(int)c[i]); } return 0; } static void report_num_threads(int level) { #pragma omp single { nk_vc_printf("Level %d: number of threads in the team - %d\n", level, omp_get_num_threads()); } } static int omp_nested() { omp_set_dynamic(0); #pragma omp parallel num_threads(2) { report_num_threads(1); #pragma omp parallel num_threads(2) { report_num_threads(2); #pragma omp parallel num_threads(2) { report_num_threads(3); } } } return(0); } int test_omp() { nk_omp_thread_init(); nk_vc_printf("Starting simple test\n"); omp_simple(); // goto out; nk_vc_printf("Starting nested test\n"); omp_nested(); out: nk_vc_printf("OMP test finished\n"); nk_omp_thread_deinit(); return 0; }
data_read.c
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #include <omp.h> #include <string.h> #include "shared_consts.h" #include <varargs.h> double **LoadEquationMatrix(char *path, int *rows, int *columns) { double **matrix; int n, i, j; FILE *file; if ((file = fopen(path, "r")) == NULL) { printf("%s:%d Nie udalo sie otworzyc podanego pliku: %s\n", __FILE__, __LINE__, path); return NULL; } if (fscanf(file, "%d", &n) != 1) return NULL; *rows = n; *columns = n + 1; matrix = ALLOC_MEMORY(sizeof(double*)*n); for (i = 0; i < n; i++) { matrix[i] = (double*)ALLOC_MEMORY(sizeof(double)*(n + 1)); for (j = 0; j <= n; j++) if (fscanf(file, "%lf", matrix[i] + j) != 1) return NULL; } fclose(file); return matrix; } double** DrawEquationMatrixParallel(int rows, int columns) { double **matrix = NULL; int i, j; matrix = ALLOC_MEMORY(sizeof(double*)*rows); if (matrix == NULL) return NULL; for (i = 0; i < rows; i++) { matrix[i] = ALLOC_MEMORY(sizeof(double)*columns); if (matrix[i] == NULL) return NULL; } srand(time(NULL)); #ifndef FALSE_SHARING __assume_aligned(matrix, ALIGNMENT_SIZE); #endif #pragma omp parallel for default(none) schedule(static) private(i, j) shared(matrix, rows, columns) collapse(2) for (i = 0; i < rows; i++) { for (j = 0; j < columns; j++) matrix[i][j] = (double)rand() / RAND_MAX; } return matrix; } double** DrawEquationMatrix(int rows, int columns) { double **matrix = NULL; int i, j; matrix = ALLOC_MEMORY(sizeof(double*)*rows); if (matrix == NULL) return NULL; srand(time(NULL)); #ifndef FALSE_SHARING __assume_aligned(matrix, ALIGNMENT_SIZE); #endif for (i = 0; i < rows; i++) { matrix[i] = ALLOC_MEMORY(sizeof(double)*columns); if (matrix[i] == NULL) return NULL; for (j = 0; j < columns; j++) matrix[i][j] = (double)rand() / RAND_MAX; } return matrix; }
hello.c
#include<stdio.h> #include<omp.h> int main(){ #pragma omp parallel { printf("Hello, World from thread #%0X.\n", omp_get_thread_num()); } return 0; }
simd5.c
#include <math.h> void main(int n,int m,float *a,float *b) { int i; #pragma omp simd order(concurrent) { for (i = 1; i < n; i++) b[i] = ((a[i] + a[i - 1]) / 2.0); } }
task_single_producer_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 5000000 #define NUM_REPS 1 void sscal(float value, float *a) { *a = *a * value; } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { sleep(2); printf("Thread %d\n", omp_get_thread_num()); time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { printf("Task %d executed by Thread %d Stolen? %s\n", i, omp_get_thread_num(), (i % nthreads == omp_get_thread_num()) ? "NO" : "YES"); sscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); return EXIT_SUCCESS; }
primordial.c
/** @file primordial.c Documented primordial module. * * Julien Lesgourgues, 24.08.2010 * * This module computes the primordial spectra. It can be used in different modes: * simple parametric form, evolving inflaton perturbations, etc. So far only * the mode corresponding to a simple analytic form in terms of amplitudes, tilts * and runnings has been developed. * * The following functions can be called from other modules: * * -# primordial_init() at the beginning (anytime after perturb_init() and before spectra_init()) * -# primordial_spectrum_at_k() at any time for computing P(k) at any k * -# primordial_free() at the end */ #include "primordial.h" /** * Primordial spectra for arbitrary argument and for all initial conditions. * * This routine evaluates the primordial spectrum at a given value of k by * interpolating in the pre-computed table. * * When k is not in the pre-computed range but the spectrum can be found * analytically, it finds it. Otherwise returns an error. * * Can be called in two modes; linear or logarithmic: * * - linear: takes k, returns P(k) * * - logarithmic: takes ln(k), return ln(P(k)) * * One little subtlety: in case of several correlated initial conditions, * the cross-correlation spectrum can be negative. Then, in logarithmic mode, * the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$ * (from -1 to 1) instead of \f$\ln{P_{12}}\f$ * * This function can be * called from whatever module at whatever time, provided that * primordial_init() has been called before, and primordial_free() has not * been called yet. * * @param ppm Input: pointer to primordial structure containing tabulated primordial spectrum * @param index_md Input: index of mode (scalar, tensor, ...) * @param mode Input: linear or logarithmic * @param input Input: wavenumber in 1/Mpc (linear mode) or its logarithm (logarithmic mode) * @param output Output: for each pair of initial conditions, primordial spectra P(k) in \f$Mpc^3\f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int primordial_spectrum_at_k( struct primordial * ppm, int index_md, enum linear_or_logarithmic mode, double input, double * output /* array with argument output[index_ic1_ic2] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_ic1,index_ic2,index_ic1_ic2; double lnk; int last_index; /** - infer ln(k) from input. In linear mode, reject negative value of input k value. */ if (mode == linear) { class_test(input<=0., ppm->error_message, "k = %e",input); lnk=log(input); } else { lnk = input; } /** - if ln(k) is not in the interpolation range, return an error, unless we are in the case of a analytic spectrum, for which a direct computation is possible */ if ((lnk > ppm->lnk[ppm->lnk_size-1]) || (lnk < ppm->lnk[0])) { class_test(ppm->primordial_spec_type != analytic_Pk, ppm->error_message, "k=%e out of range [%e : %e]",exp(lnk),exp(ppm->lnk[0]),exp(ppm->lnk[ppm->lnk_size-1])); /* direct computation */ for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]); if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { class_call(primordial_analytic_spectrum(ppm, index_md, index_ic1_ic2, exp(lnk), &(output[index_ic1_ic2])), ppm->error_message, ppm->error_message); } else { output[index_ic1_ic2] = 0.; } } } /* if mode==linear, output is already in the correct format. Otherwise, apply necessary transformation. */ if (mode == logarithmic) { for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]); output[index_ic1_ic2] = log(output[index_ic1_ic2]); } for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]); if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { output[index_ic1_ic2] /= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]* output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]); } } } } } /** - otherwise, interpolate in the pre-computed table */ else { class_call(array_interpolate_spline( ppm->lnk, ppm->lnk_size, ppm->lnpk[index_md], ppm->ddlnpk[index_md], ppm->ic_ic_size[index_md], lnk, &last_index, output, ppm->ic_ic_size[index_md], ppm->error_message), ppm->error_message, ppm->error_message); /* if mode==logarithmic, output is already in the correct format. Otherwise, apply necessary transformation. */ if (mode == linear) { for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]); output[index_ic1_ic2]=exp(output[index_ic1_ic2]); } for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]); if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { output[index_ic1_ic2] *= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]* output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]); } else { output[index_ic1_ic2] = 0.; } } } } } return _SUCCESS_; } /** * This routine initializes the primordial structure (in particular, it computes table of primordial spectrum values) * * @param ppr Input: pointer to precision structure (defines method and precision for all computations) * @param ppt Input: pointer to perturbation structure (useful for knowing k_min, k_max, etc.) * @param ppm Output: pointer to initialized primordial structure * @return the error status */ int primordial_init( struct precision * ppr, struct perturbs * ppt, struct primordial * ppm ) { /** Summary: */ /** - define local variables */ double k,k_min,k_max; int index_md,index_ic1,index_ic2,index_ic1_ic2,index_k; double pk,pk1,pk2; double dlnk,lnpk_pivot,lnpk_minus,lnpk_plus,lnpk_minusminus,lnpk_plusplus; /* uncomment if you use optional test below (for correlated isocurvature modes) */ //double cos_delta_k; /** - check that we really need to compute the primordial spectra */ if (ppt->has_perturbations == _FALSE_) { ppm->lnk_size=0; if (ppm->primordial_verbose > 0) printf("No perturbations requested. Primordial module skipped.\n"); return _SUCCESS_; } else { if (ppm->primordial_verbose > 0) printf("Computing primordial spectra"); } /** - get kmin and kmax from perturbation structure. Test that they make sense. */ k_min = ppt->k_min; /* first value, inferred from perturbations structure */ k_max = ppt->k_max; /* last value, inferred from perturbations structure */ class_test(k_min <= 0., ppm->error_message, "k_min negative or null: stop to avoid segmentation fault"); class_test(k_max <= 0., ppm->error_message, "k_max negative or null: stop to avoid segmentation fault"); class_test(ppm->k_pivot <= 0., ppm->error_message, "k_pivot negative or null: stop to avoid segmentation fault"); class_test(ppr->k_per_decade_primordial <= 0., ppm->error_message, "k_per_decade_primordial negative or null: stop to avoid segmentation fault"); class_test(ppr->k_per_decade_primordial <= _K_PER_DECADE_PRIMORDIAL_MIN_, ppm->error_message, "k_per_decade_primordial = %e: you ask for such a sparse sampling of the primordial spectrum that this is probably a mistake", ppr->k_per_decade_primordial); /** - allocate and fill values of \f$ \ln{k}\f$'s */ class_call(primordial_get_lnk_list(ppm, k_min, k_max, ppr->k_per_decade_primordial ), ppm->error_message, ppm->error_message); /** - define indices and allocate tables in primordial structure */ class_call(primordial_indices(ppt, ppm), ppm->error_message, ppm->error_message); /** - deal with case of analytic primordial spectra (with amplitudes, tilts, runnings, etc.) */ if (ppm->primordial_spec_type == analytic_Pk) { if (ppm->primordial_verbose > 0) printf(" (analytic spectrum)\n"); class_call_except(primordial_analytic_spectrum_init(ppt, ppm), ppm->error_message, ppm->error_message, primordial_free(ppm)); for (index_k = 0; index_k < ppm->lnk_size; index_k++) { k=exp(ppm->lnk[index_k]); for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]); if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { class_call(primordial_analytic_spectrum(ppm, index_md, index_ic1_ic2, k, &pk), ppm->error_message, ppm->error_message); if (index_ic1 == index_ic2) { /* diagonal coefficients: ln[P(k)] */ ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = log(pk); } else { /* non-diagonal coefficients: cosDelta(k) = P(k)_12/sqrt[P(k)_1 P(k)_2] */ class_call(primordial_analytic_spectrum(ppm, index_md, index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]), k, &pk1), ppm->error_message, ppm->error_message); class_call(primordial_analytic_spectrum(ppm, index_md, index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]), k, &pk2), ppm->error_message, ppm->error_message); /* either return an error if correlation is too large... */ /* cos_delta_k = pk/sqrt(pk1*pk2); class_test_except((cos_delta_k < -1.) || (cos_delta_k > 1.), ppm->error_message, primordial_free(ppm), "correlation angle between IC's takes unphysical values"); ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = cos_delta_k; */ /* ... or enforce definite positive correlation matrix */ if (pk > sqrt(pk1*pk2)) ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 1.; else if (pk < -sqrt(pk1*pk2)) ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = -1.; else ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = pk/sqrt(pk1*pk2); } } else { /* non-diagonal coefficients when ic's are uncorrelated */ ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 0.; } } } } } } /** - deal with case of inflation with given \f$V(\phi)\f$ or \f$H(\phi)\f$ */ else if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_H) || (ppm->primordial_spec_type == inflation_V_end)) { class_call(primordial_inflation_indices(ppm), ppm->error_message, ppm->error_message); if (ppm->primordial_verbose > 0) printf(" (simulating inflation)\n"); class_call_except(primordial_inflation_solve_inflation(ppt,ppm,ppr), ppm->error_message, ppm->error_message, primordial_free(ppm)); } /** - deal with the case of external calculation of \f$ P_k \f$*/ else if (ppm->primordial_spec_type == external_Pk) { class_test(ppt->has_scalars == _FALSE_, ppm->error_message, "external Pk module cannot work if you do not ask for scalar modes"); class_test(ppt->has_vectors == _TRUE_, ppm->error_message, "external Pk module cannot work if you ask for vector modes"); class_test(ppt->has_bi == _TRUE_ || ppt->has_cdi == _TRUE_ || ppt->has_nid == _TRUE_ || ppt->has_niv == _TRUE_, ppm->error_message, "external Pk module cannot work if you ask for isocurvature modes (but that could be implemented easily in the future!)"); if (ppm->primordial_verbose > 0) printf(" (Pk calculated externally)\n"); class_call_except(primordial_external_spectrum_init(ppt,ppm), ppm->error_message, ppm->error_message, primordial_free(ppm)); } else { class_test(0==0, ppm->error_message, "primordial spectrum type not recognized"); } /** - compute second derivative of each \f$ \ln{P_k} \f$ versus lnk with spline, in view of interpolation */ for (index_md = 0; index_md < ppm->md_size; index_md++) { class_call(array_spline_table_lines(ppm->lnk, ppm->lnk_size, ppm->lnpk[index_md], ppm->ic_ic_size[index_md], ppm->ddlnpk[index_md], _SPLINE_EST_DERIV_, ppm->error_message), ppm->error_message, ppm->error_message); } /** - derive spectral parameters from numerically computed spectra (not used by the rest of the code, but useful to keep in memory for several types of investigation) */ if (ppm->primordial_spec_type != analytic_Pk) { dlnk = log(10.)/ppr->k_per_decade_primordial; if (ppt->has_scalars == _TRUE_) { class_call(primordial_spectrum_at_k(ppm, ppt->index_md_scalars, logarithmic, log(ppm->k_pivot), &lnpk_pivot), ppm->error_message, ppm->error_message); class_call(primordial_spectrum_at_k(ppm, ppt->index_md_scalars, logarithmic, log(ppm->k_pivot)+dlnk, &lnpk_plus), ppm->error_message, ppm->error_message); class_call(primordial_spectrum_at_k(ppm, ppt->index_md_scalars, logarithmic, log(ppm->k_pivot)-dlnk, &lnpk_minus), ppm->error_message, ppm->error_message); ppm->A_s = exp(lnpk_pivot); ppm->n_s = (lnpk_plus-lnpk_minus)/(2.*dlnk)+1.; ppm->alpha_s = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2); /** - expression for alpha_s comes from: `ns_2 = (lnpk_plus-lnpk_pivot)/(dlnk)+1` `ns_1 = (lnpk_pivot-lnpk_minus)/(dlnk)+1` `alpha_s = dns/dlnk = (ns_2-ns_1)/dlnk = (lnpk_plus-lnpk_pivot-lnpk_pivot+lnpk_minus)/(dlnk)/(dlnk)` **/ class_call(primordial_spectrum_at_k(ppm, ppt->index_md_scalars, logarithmic, log(ppm->k_pivot)+2.*dlnk, &lnpk_plusplus), ppm->error_message, ppm->error_message); class_call(primordial_spectrum_at_k(ppm, ppt->index_md_scalars, logarithmic, log(ppm->k_pivot)-2.*dlnk, &lnpk_minusminus), ppm->error_message, ppm->error_message); /** - expression for beta_s: `ppm->beta_s = (alpha_plus-alpha_minus)/dlnk = (lnpk_plusplus-2.*lnpk_plus+lnpk_pivot - (lnpk_pivot-2.*lnpk_minus+lnpk_minusminus)/pow(dlnk,3)` **/ /* Simplification of the beta_s expression: */ ppm->beta_s = (lnpk_plusplus-2.*lnpk_plus+2.*lnpk_minus-lnpk_minusminus)/pow(dlnk,3); if (ppm->primordial_verbose > 0) printf(" -> A_s=%g n_s=%g alpha_s=%g\n",ppm->A_s,ppm->n_s,ppm->alpha_s); } if (ppt->has_tensors == _TRUE_) { class_call(primordial_spectrum_at_k(ppm, ppt->index_md_tensors, logarithmic, log(ppm->k_pivot), &lnpk_pivot), ppm->error_message, ppm->error_message); class_call(primordial_spectrum_at_k(ppm, ppt->index_md_tensors, logarithmic, log(ppm->k_pivot)+dlnk, &lnpk_plus), ppm->error_message, ppm->error_message); class_call(primordial_spectrum_at_k(ppm, ppt->index_md_tensors, logarithmic, log(ppm->k_pivot)-dlnk, &lnpk_minus), ppm->error_message, ppm->error_message); ppm->r = exp(lnpk_pivot)/ppm->A_s; ppm->n_t = (lnpk_plus-lnpk_minus)/(2.*dlnk); ppm->alpha_t = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2); if (ppm->primordial_verbose > 0) printf(" -> r=%g n_t=%g alpha_t=%g\n",ppm->r,ppm->n_t,ppm->alpha_t); } } return _SUCCESS_; } /** * This routine frees all the memory space allocated by primordial_init(). * * To be called at the end of each run. * * @param ppm Input: pointer to primordial structure (which fields must be freed) * @return the error status */ int primordial_free( struct primordial * ppm ) { int index_md; if (ppm->lnk_size > 0) { if (ppm->primordial_spec_type == analytic_Pk) { for (index_md = 0; index_md < ppm->md_size; index_md++) { free(ppm->amplitude[index_md]); free(ppm->tilt[index_md]); free(ppm->running[index_md]); } free(ppm->amplitude); free(ppm->tilt); free(ppm->running); } else if (ppm->primordial_spec_type == external_Pk) { free(ppm->command); } for (index_md = 0; index_md < ppm->md_size; index_md++) { free(ppm->lnpk[index_md]); free(ppm->ddlnpk[index_md]); free(ppm->is_non_zero[index_md]); } free(ppm->lnpk); free(ppm->ddlnpk); free(ppm->is_non_zero); free(ppm->ic_size); free(ppm->ic_ic_size); free(ppm->lnk); } return _SUCCESS_; } /** * This routine defines indices and allocates tables in the primordial structure * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @return the error status */ int primordial_indices( struct perturbs * ppt, struct primordial * ppm ) { int index_md; ppm->md_size = ppt->md_size; class_alloc(ppm->lnpk,ppt->md_size*sizeof(double*),ppm->error_message); class_alloc(ppm->ddlnpk,ppt->md_size*sizeof(double*),ppm->error_message); class_alloc(ppm->ic_size,ppt->md_size*sizeof(int*),ppm->error_message); class_alloc(ppm->ic_ic_size,ppt->md_size*sizeof(int*),ppm->error_message); class_alloc(ppm->is_non_zero,ppm->md_size*sizeof(short *),ppm->error_message); for (index_md = 0; index_md < ppt->md_size; index_md++) { ppm->ic_size[index_md] = ppt->ic_size[index_md]; ppm->ic_ic_size[index_md] = (ppm->ic_size[index_md]*(ppm->ic_size[index_md]+1))/2; class_alloc(ppm->lnpk[index_md], ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double), ppm->error_message); class_alloc(ppm->ddlnpk[index_md], ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double), ppm->error_message); class_alloc(ppm->is_non_zero[index_md], ppm->ic_ic_size[index_md]*sizeof(short), ppm->error_message); } return _SUCCESS_; } /** * This routine allocates and fills the list of wavenumbers k * * * @param ppm Input/output: pointer to primordial structure * @param kmin Input: first value * @param kmax Input: last value that we should encompass * @param k_per_decade Input: number of k per decade * @return the error status */ int primordial_get_lnk_list( struct primordial * ppm, double kmin, double kmax, double k_per_decade ) { int i; class_test((kmin <= 0.) || (kmax <= kmin), ppm->error_message, "inconsistent values of kmin=%e, kmax=%e",kmin,kmax); ppm->lnk_size = (int)(log(kmax/kmin)/log(10.)*k_per_decade) + 2; class_alloc(ppm->lnk,ppm->lnk_size*sizeof(double),ppm->error_message); for (i=0; i<ppm->lnk_size; i++) ppm->lnk[i]=log(kmin)+i*log(10.)/k_per_decade; return _SUCCESS_; } /** * This routine interprets and stores in a condensed form the input parameters * in the case of a simple analytic spectra with amplitudes, tilts, runnings, * in such way that later on, the spectrum can be obtained by a quick call to * the routine primordial_analytic_spectrum(() * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @return the error status */ int primordial_analytic_spectrum_init( struct perturbs * ppt, struct primordial * ppm ) { int index_md,index_ic1,index_ic2; int index_ic1_ic2,index_ic1_ic1,index_ic2_ic2; double one_amplitude=0.; double one_tilt=0.; double one_running=0.; double one_correlation=0.; class_alloc(ppm->amplitude, ppm->md_size*sizeof(double *), ppm->error_message); class_alloc(ppm->tilt, ppm->md_size*sizeof(double *), ppm->error_message); class_alloc(ppm->running, ppm->md_size*sizeof(double *), ppm->error_message); for (index_md = 0; index_md < ppm->md_size; index_md++) { class_alloc(ppm->amplitude[index_md], ppm->ic_ic_size[index_md]*sizeof(double), ppm->error_message); class_alloc(ppm->tilt[index_md], ppm->ic_ic_size[index_md]*sizeof(double), ppm->error_message); class_alloc(ppm->running[index_md], ppm->ic_ic_size[index_md]*sizeof(double), ppm->error_message); } for (index_md = 0; index_md < ppm->md_size; index_md++) { /* diagonal coefficients */ for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { if (_scalars_) { if ((ppt->has_ad == _TRUE_) && (index_ic1 == ppt->index_ic_ad)) { one_amplitude = ppm->A_s; one_tilt = ppm->n_s; one_running = ppm->alpha_s; } if ((ppt->has_bi == _TRUE_) && (index_ic1 == ppt->index_ic_bi)) { one_amplitude = ppm->A_s*ppm->f_bi*ppm->f_bi; one_tilt = ppm->n_bi; one_running = ppm->alpha_bi; } if ((ppt->has_cdi == _TRUE_) && (index_ic1 == ppt->index_ic_cdi)) { one_amplitude = ppm->A_s*ppm->f_cdi*ppm->f_cdi; one_tilt = ppm->n_cdi; one_running = ppm->alpha_cdi; } if ((ppt->has_nid == _TRUE_) && (index_ic1 == ppt->index_ic_nid)) { one_amplitude = ppm->A_s*ppm->f_nid*ppm->f_nid; one_tilt = ppm->n_nid; one_running = ppm->alpha_nid; } if ((ppt->has_niv == _TRUE_) && (index_ic1 == ppt->index_ic_niv)) { one_amplitude = ppm->A_s*ppm->f_niv*ppm->f_niv; one_tilt = ppm->n_niv; one_running = ppm->alpha_niv; } } if (_tensors_) { if (index_ic1 == ppt->index_ic_ten) { one_amplitude = ppm->A_s*ppm->r; one_tilt = ppm->n_t+1.; /* +1 to match usual definition of n_t (equivalent to n_s-1) */ one_running = ppm->alpha_t; } } class_test(one_amplitude <= 0., ppm->error_message, "inconsistent input for primordial amplitude: %g for index_md=%d, index_ic=%d\n", one_amplitude,index_md,index_ic1); index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]); ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_; ppm->amplitude[index_md][index_ic1_ic2] = one_amplitude; ppm->tilt[index_md][index_ic1_ic2] = one_tilt; ppm->running[index_md][index_ic1_ic2] = one_running; } /* non-diagonal coefficients */ for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) { if (_scalars_) { if ((ppt->has_ad == _TRUE_) && (ppt->has_bi == _TRUE_) && (((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_bi)) || ((index_ic1 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_bi)))) { one_correlation = ppm->c_ad_bi; one_tilt = ppm->n_ad_bi; one_running = ppm->alpha_ad_bi; } if ((ppt->has_ad == _TRUE_) && (ppt->has_cdi == _TRUE_) && (((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_cdi)) || ((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_cdi)))) { one_correlation = ppm->c_ad_cdi; one_tilt = ppm->n_ad_cdi; one_running = ppm->alpha_ad_cdi; } if ((ppt->has_ad == _TRUE_) && (ppt->has_nid == _TRUE_) && (((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_nid)) || ((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_nid)))) { one_correlation = ppm->c_ad_nid; one_tilt = ppm->n_ad_nid; one_running = ppm->alpha_ad_nid; } if ((ppt->has_ad == _TRUE_) && (ppt->has_niv == _TRUE_) && (((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_niv)) || ((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_niv)))) { one_correlation = ppm->c_ad_niv; one_tilt = ppm->n_ad_niv; one_running = ppm->alpha_ad_niv; } if ((ppt->has_bi == _TRUE_) && (ppt->has_cdi == _TRUE_) && (((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_cdi)) || ((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_cdi)))) { one_correlation = ppm->c_bi_cdi; one_tilt = ppm->n_bi_cdi; one_running = ppm->alpha_bi_cdi; } if ((ppt->has_bi == _TRUE_) && (ppt->has_nid == _TRUE_) && (((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_nid)) || ((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_nid)))) { one_correlation = ppm->c_bi_nid; one_tilt = ppm->n_bi_nid; one_running = ppm->alpha_bi_nid; } if ((ppt->has_bi == _TRUE_) && (ppt->has_niv == _TRUE_) && (((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_niv)) || ((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_niv)))) { one_correlation = ppm->c_bi_niv; one_tilt = ppm->n_bi_niv; one_running = ppm->alpha_bi_niv; } if ((ppt->has_cdi == _TRUE_) && (ppt->has_nid == _TRUE_) && (((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_nid)) || ((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_nid)))) { one_correlation = ppm->c_cdi_nid; one_tilt = ppm->n_cdi_nid; one_running = ppm->alpha_cdi_nid; } if ((ppt->has_cdi == _TRUE_) && (ppt->has_niv == _TRUE_) && (((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_niv)) || ((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_niv)))) { one_correlation = ppm->c_cdi_niv; one_tilt = ppm->n_cdi_niv; one_running = ppm->alpha_cdi_niv; } if ((ppt->has_nid == _TRUE_) && (ppt->has_niv == _TRUE_) && (((index_ic1 == ppt->index_ic_nid) && (index_ic2 == ppt->index_ic_niv)) || ((index_ic2 == ppt->index_ic_nid) && (index_ic1 == ppt->index_ic_niv)))) { one_correlation = ppm->c_nid_niv; one_tilt = ppm->n_nid_niv; one_running = ppm->alpha_nid_niv; } } class_test((one_correlation < -1) || (one_correlation > 1), ppm->error_message, "inconsistent input for isocurvature cross-correlation\n"); index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]); index_ic1_ic1 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]); index_ic2_ic2 = index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]); if (one_correlation == 0.) { ppm->is_non_zero[index_md][index_ic1_ic2] = _FALSE_; ppm->amplitude[index_md][index_ic1_ic2] = 0.; ppm->tilt[index_md][index_ic1_ic2] = 0.; ppm->running[index_md][index_ic1_ic2] = 0.; } else { ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_; ppm->amplitude[index_md][index_ic1_ic2] = sqrt(ppm->amplitude[index_md][index_ic1_ic1]* ppm->amplitude[index_md][index_ic2_ic2])* one_correlation; ppm->tilt[index_md][index_ic1_ic2] = 0.5*(ppm->tilt[index_md][index_ic1_ic1] +ppm->tilt[index_md][index_ic2_ic2]) + one_tilt; ppm->running[index_md][index_ic1_ic2] = 0.5*(ppm->running[index_md][index_ic1_ic1] +ppm->running[index_md][index_ic2_ic2]) + one_running; } } } } return _SUCCESS_; } /** * This routine returns the primordial spectrum in the simple analytic case with * amplitudes, tilts, runnings, for each mode (scalar/tensor...), * pair of initial conditions, and wavenumber. * * @param ppm Input/output: pointer to primordial structure * @param index_md Input: index of mode (scalar, tensor, ...) * @param index_ic1_ic2 Input: pair of initial conditions (ic1, ic2) * @param k Input: wavenumber in same units as pivot scale, i.e. in 1/Mpc * @param pk Output: primordial power spectrum A (k/k_pivot)^(n+...) * @return the error status */ int primordial_analytic_spectrum( struct primordial * ppm, int index_md, int index_ic1_ic2, double k, double * pk ) { if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { *pk = ppm->amplitude[index_md][index_ic1_ic2] *exp((ppm->tilt[index_md][index_ic1_ic2]-1.)*log(k/ppm->k_pivot) + 0.5 * ppm->running[index_md][index_ic1_ic2] * pow(log(k/ppm->k_pivot), 2.)); } else { *pk = 0.; } return _SUCCESS_; } /** * This routine encodes the inflaton scalar potential * * @param ppm Input: pointer to primordial structure * @param phi Input: background inflaton field value in units of Mp * @param V Output: inflaton potential in units of \f$ Mp^4\f$ * @param dV Output: first derivative of inflaton potential wrt the field * @param ddV Output: second derivative of inflaton potential wrt the field * @return the error status */ int primordial_inflation_potential( struct primordial * ppm, double phi, double * V, double * dV, double * ddV ) { double e,de,dde,mu,dmu,ddmu,l,dl,ddl,p,dp,ddp; switch (ppm->potential) { /* V(phi)=polynomial in phi */ case polynomial: *V = ppm->V0+phi*ppm->V1+pow(phi,2)/2.*ppm->V2+pow(phi,3)/6.*ppm->V3+pow(phi,4)/24.*ppm->V4; *dV = ppm->V1+phi*ppm->V2+pow(phi,2)/2.*ppm->V3+pow(phi,3)/6.*ppm->V4; *ddV = ppm->V2+phi*ppm->V3+pow(phi,2)/2.*ppm->V4; break; /* V(phi) = Lambda^4(1+cos(phi/f)) = V0 (1+cos(phi/V1)) */ case natural: *V = ppm->V0*(1.+cos(phi/ppm->V1)); *dV = -ppm->V0/ppm->V1*sin(phi/ppm->V1); *ddV = -ppm->V0/ppm->V1/ppm->V1*cos(phi/ppm->V1); break; /* Higgs inflation from arXiv:1403.6078 */ case higgs_inflation: // correspondence with 1403.6078: // V0 = b // V1 = ksi // V2 = kappa // V3 = delta_lambda // mu = bar(mu)/M_P // phi = -chi/M_P e = exp(2./sqrt(6.)*sqrt(8.*_PI_)*phi); de = 2./sqrt(6.)*sqrt(8.*_PI_)*e; dde = 2./3. * 8.*_PI_ * e; mu = pow(1.-e,0.5); dmu = -0.5*de*pow(1.-e,-0.5); ddmu = -0.5*dde*pow(1.-e,-0.5)-0.25*de*de*pow(1.-e,-1.5); l = log(mu/ppm->V2); dl = dmu/mu; ddl = ddmu/mu - dl*dl; p = 1./16. + ppm->V3/ppm->V0 + l*l; dp = 2.*dl*l; ddp = 2.*ddl*l+2.*dl*dl; *V = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*p*pow(mu,4); *dV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(dp*pow(mu,4)+4.*p*dmu*pow(mu,3)); *ddV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(ddp*pow(mu,4)+8.*dp*dmu*pow(mu,3)+4.*p*ddmu*pow(mu,3)+12.*p*pow(dmu*mu,2)); //fprintf(stderr,"%e %e %e\n",*V,p,mu); break; /* code here other shapes */ default: class_stop(ppm->error_message,"ppm->potential=%d different from all known cases",ppm->potential); break; } return _SUCCESS_; } /** * This routine encodes the function \f$ H(\phi)\f$ * * @param ppm Input: pointer to primordial structure * @param phi Input: background inflaton field value in units of Mp * @param H Output: Hubble parameters in units of Mp * @param dH Output: \f$ dH / d\phi \f$ * @param ddH Output: \f$ d^2H / d\phi^2 \f$ * @param dddH Output: \f$ d^3H / d\phi^3 \f$ * @return the error status */ int primordial_inflation_hubble( struct primordial * ppm, double phi, double * H, double * dH, double * ddH, double * dddH ) { *H = ppm->H0 + phi*ppm->H1 + pow(phi,2)/2.*ppm->H2 + pow(phi,3)/6.*ppm->H3 + pow(phi,4)/24.*ppm->H4; *dH = ppm->H1 + phi*ppm->H2 + pow(phi,2)/2.*ppm->H3 + pow(phi,3)/6.*ppm->H4; *ddH = ppm->H2 + phi*ppm->H3 + pow(phi,2)/2.*ppm->H4; *dddH = ppm->H3 + phi*ppm->H4; return _SUCCESS_; } /** * This routine defines indices used by the inflation simulator * * @param ppm Input/output: pointer to primordial structure * @return the error status */ int primordial_inflation_indices( struct primordial * ppm ) { int index_in; index_in = 0; /* indices for background quantities */ ppm->index_in_a = index_in; index_in ++; ppm->index_in_phi = index_in; index_in ++; if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) { ppm->index_in_dphi = index_in; index_in ++; } /* size of background vector */ ppm->in_bg_size = index_in; /* indices for perturbations */ ppm->index_in_ksi_re = index_in; index_in ++; ppm->index_in_ksi_im = index_in; index_in ++; ppm->index_in_dksi_re = index_in; index_in ++; ppm->index_in_dksi_im = index_in; index_in ++; ppm->index_in_ah_re = index_in; index_in ++; ppm->index_in_ah_im = index_in; index_in ++; ppm->index_in_dah_re = index_in; index_in ++; ppm->index_in_dah_im = index_in; index_in ++; /* size of perturbation vector */ ppm->in_size = index_in; return _SUCCESS_; } /** * Main routine of inflation simulator. Its goal is to check the * background evolution before and after the pivot value * phi=phi_pivot, and then, if this evolution is suitable, to call the * routine primordial_inflation_spectra(). * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @param ppr Input: pointer to precision structure * @return the error status */ int primordial_inflation_solve_inflation( struct perturbs * ppt, struct primordial * ppm, struct precision *ppr ) { /** Summary: */ /** - define local variables */ double * y; double * y_ini; double * dy; double a_pivot; double a_try; double H_pivot; double H_try; double phi_try; double dphidt_pivot; double dphidt_try; double aH_ini,aH_end; double k_max,k_min; int counter; double dH,ddH,dddH; /** - allocate vectors for background/perturbed quantities */ class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message); class_alloc(y_ini,ppm->in_size*sizeof(double),ppm->error_message); class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message); /** - eventually, needs first to find phi_pivot */ if (ppm->primordial_spec_type == inflation_V_end) { class_call(primordial_inflation_find_phi_pivot(ppm,ppr,y,dy), ppm->error_message, ppm->error_message); } else { ppm->phi_pivot = 0.; } // uncomment these lines if for checking, you want first-order slow-roll predictions /* if (ppm->primordial_verbose>0) { if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) { double V,dV,ddV; class_call(primordial_inflation_check_potential(ppm,ppm->phi_pivot,&V,&dV,&ddV), ppm->error_message, ppm->error_message); fprintf(stdout," -> 1st-order slow-roll prediction for A_s: %g\n",128.*_PI_/3.*pow(V,3)/pow(dV,2)); fprintf(stdout," -> 1st-order slow-roll prediction for T/S: %g\n",pow(dV/V,2)/_PI_); fprintf(stdout," -> 1st-order slow-roll prediction for A_T: %g\n",pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2)); fprintf(stdout," -> 1st-order slow-roll prediction for n_s: %g\n",1.-6./16./_PI_*pow(dV/V,2)+2./8./_PI_*(ddV/V)); fprintf(stdout," -> 1st-order slow-roll prediction for n_t: %g\n",-2./16./_PI_*pow(dV/V,2)); } } */ /** - compute H_pivot at phi_pivot */ switch (ppm->primordial_spec_type) { case inflation_V: case inflation_V_end: /** - check positivity and negative slope of potential in field pivot value, and find value of phi_dot and H for field's pivot value, assuming slow-roll attractor solution has been reached. If no solution, code will stop there. */ if (ppm->primordial_verbose > 1) printf(" (search attractor at pivot)\n"); class_call_except(primordial_inflation_find_attractor(ppm, ppr, ppm->phi_pivot, ppr->primordial_inflation_attractor_precision_pivot, y, dy, &H_pivot, &dphidt_pivot), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); break; case inflation_H: /** - check positivity and negative slope of \f$ H(\phi)\f$ in field pivot value, and get H_pivot */ class_call_except(primordial_inflation_check_hubble(ppm, ppm->phi_pivot, &H_pivot, &dH, &ddH, &dddH), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); break; default: free(y);free(y_ini);free(dy); class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type); break; } /** - find a_pivot, value of scale factor when k_pivot crosses horizon while phi=phi_pivot */ a_pivot = ppm->k_pivot/H_pivot; /** - integrate background solution starting from phi_pivot and until k_max>>aH. This ensures that the inflationary model considered here is valid and that the primordial spectrum can be computed. Otherwise, if slow-roll brakes too early, model is not suitable and run stops. */ if (ppm->primordial_verbose > 1) printf(" (check inflation duration after phi_pivot=%e)\n",ppm->phi_pivot); k_max = exp(ppm->lnk[ppm->lnk_size-1]); aH_end = k_max/ppr->primordial_inflation_ratio_max; y[ppm->index_in_a] = a_pivot; y[ppm->index_in_phi] = ppm->phi_pivot; if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) y[ppm->index_in_dphi] = a_pivot*dphidt_pivot; class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, aH_end, _TRUE_, forward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); /* we need to do the opposite: to check that there is an initial time such that k_min << (aH)_ini. A guess is made by integrating backward in time. This can be done exactly for inflation_H, or only approximately for inflation_V (using the first-order approximation to the attractor inflationary solution). However this approximation is irrelevant because nevertheless, later on, we compute the attractor solution at the initial time with high accuracy, and then we integrate the background equations forward in time. Hence the approximation made here introduces zero mistake on the final result. It is just a way to find quickly a reasonable initial phi value. In the inflation_V case, if the exact forward integration reveals that the guess was not good (i.e. does not correspond to "early enough"), we iterate over sequences of backward/forward integration, until a correct time is found. For potential such that no solution exists (no long-enough slow-roll period before the pivot scale), the run stops. */ if (ppm->primordial_verbose > 1) printf(" (check inflation duration before pivot)\n"); k_min = exp(ppm->lnk[0]); aH_ini = k_min/ppr->primordial_inflation_ratio_min; switch (ppm->primordial_spec_type) { case inflation_V: case inflation_V_end: counter = 0; y[ppm->index_in_a] = a_pivot; y[ppm->index_in_phi] = ppm->phi_pivot; do { /* counter to avoid infinite loop */ counter ++; class_test_except(counter >= ppr->primordial_inflation_phi_ini_maxit, ppm->error_message, free(y);free(y_ini);free(dy), "when searching for an initial value of phi just before observable inflation takes place, could not converge after %d iterations. The potential does not allow eough inflationary e-folds before reaching the pivot scale", counter); /* try to find a value phi_try such that aH=aH_ini*(ppr->primordial_inflation_aH_ini_target) (default: aH_ini*0.9). But this is using the approximate backward solution. So, anyway, we will check using the exact forward solution that at this phi_try, we really have aH < aH_ini; if this is not the case, we will iterate until a correct phi_try is found. */ class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, aH_ini*ppr->primordial_inflation_aH_ini_target, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); phi_try = y[ppm->index_in_phi]; /* in inflation_V case, find the accurate attractor solution for phi_ini', and then the correct value of a_ini, and finally of dphi/dtau_ini */ /* find dphi/dt_ini (unlike dphi/dtau_ini, this does not depend on normalization of a) */ class_call_except(primordial_inflation_find_attractor(ppm, ppr, phi_try, ppr->primordial_inflation_attractor_precision_initial, y, dy, &H_try, &dphidt_try), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); /* we need to normalize a properly so that a=a_pivot when phi=phi_pivot. To do so, we evolve starting arbitrarily from a_ini=1, and then we rescale a_ini appropriately. */ y[ppm->index_in_a] = 1.; y[ppm->index_in_phi] = phi_try; y[ppm->index_in_dphi] = y[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _phi_, ppm->phi_pivot, _TRUE_, forward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); /* now impose the correct a_ini */ a_try = a_pivot/y[ppm->index_in_a]; /* in case another iteration will be needed, set a new starting point for the routine primordial_inflation_evolve_background(...,backward) */ y[ppm->index_in_a] = a_try; y[ppm->index_in_phi] = phi_try; } while (a_try*H_try > aH_ini); y_ini[ppm->index_in_a] = a_try; y_ini[ppm->index_in_phi] = phi_try; y_ini[ppm->index_in_dphi] = y_ini[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt break; case inflation_H: y[ppm->index_in_a] = a_pivot; y[ppm->index_in_phi] = ppm->phi_pivot; class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, aH_ini, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); y_ini[ppm->index_in_a] = y[ppm->index_in_a]; y_ini[ppm->index_in_phi] = y[ppm->index_in_phi]; break; default: free(y);free(y_ini);free(dy); class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type); break; } /** - starting from this time, i.e. from y_ini[ ], we run the routine which takes care of computing the primordial spectrum. */ if (ppm->primordial_verbose > 1) printf(" (compute spectrum)\n"); if (ppm->behavior == numerical) { class_call_except(primordial_inflation_spectra(ppt, ppm, ppr, y_ini), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); } else if (ppm->behavior == analytical) { class_call_except(primordial_inflation_analytic_spectra(ppt, ppm, ppr, y_ini), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); } else { class_stop(ppm->error_message,"Uncomprehensible value of the flag ppm->behavior=%d",ppm->behavior); } /** - before ending, we want to compute and store the values of \f$ \phi \f$ corresponding to k=aH for k_min and k_max */ y[ppm->index_in_a] = y_ini[ppm->index_in_a]; y[ppm->index_in_phi] = y_ini[ppm->index_in_phi]; if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi]; class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, k_min, _FALSE_, forward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); ppm->phi_min=y[ppm->index_in_phi]; class_call_except(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, k_max, _FALSE_, forward, conformal), ppm->error_message, ppm->error_message, free(y);free(y_ini);free(dy)); ppm->phi_max=y[ppm->index_in_phi]; if (ppm->primordial_verbose > 1) printf(" (observable power spectrum goes from %e to %e)\n", ppm->phi_min, ppm->phi_max); /** - finally, we can de-allocate */ free(y); free(y_ini); free(dy); return _SUCCESS_; } /** * Routine for the computation of an analytic apporoximation to the * the primordial spectrum. In general, should be used only for * comparing with exact numerical computation performed by * primordial_inflation_spectra(). * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled * @return the error status */ int primordial_inflation_analytic_spectra( struct perturbs * ppt, struct primordial * ppm, struct precision * ppr, double * y_ini ) { double * y; double * dy; int index_k; double k,phi_k; double curvature,tensors; double V,dV,ddV; /** Summary */ /** - allocate vectors for background/perturbed quantities */ class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message); class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message); /** - initialize the background part of the running vector */ y[ppm->index_in_a] = y_ini[ppm->index_in_a]; y[ppm->index_in_phi] = y_ini[ppm->index_in_phi]; if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi]; /** - loop over Fourier wavenumbers */ for (index_k=0; index_k < ppm->lnk_size; index_k++) { k = exp(ppm->lnk[index_k]); /* evolve background until k=aH is reached */ class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, k, _FALSE_, forward, conformal), ppm->error_message, ppm->error_message); /** - read value of phi at time when k=aH */ phi_k = y[ppm->index_in_phi]; /** - get potential (and its derivatives) at this value */ class_call(primordial_inflation_check_potential(ppm,phi_k,&V,&dV,&ddV), ppm->error_message, ppm->error_message); /** - calculate the analytic slow-roll formula for the spectra */ curvature = 128.*_PI_/3.*pow(V,3)/pow(dV,2); tensors = pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2); /** - store the obtained result for curvature and tensor perturbations */ ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature); ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors); } ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_; ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_; return _SUCCESS_; } /** * Routine with a loop over wavenumbers for the computation of the primordial * spectrum. For each wavenumber it calls primordial_inflation_one_wavenumber() * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled * @return the error status */ int primordial_inflation_spectra( struct perturbs * ppt, struct primordial * ppm, struct precision * ppr, double * y_ini ) { int index_k; /* number of threads (always one if no openmp) */ int number_of_threads=1; /* index of the thread (always 0 if no openmp) */ int thread=0; /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop, tspent; #endif #ifdef _OPENMP #pragma omp parallel { number_of_threads = omp_get_num_threads(); } #endif abort = _FALSE_; #pragma omp parallel shared(ppt,ppm,ppr,abort,y_ini) private(index_k,thread,tspent,tstart,tstop) num_threads(number_of_threads) { #ifdef _OPENMP thread = omp_get_thread_num(); tspent=0.; #endif #pragma omp for schedule (dynamic) /* loop over Fourier wavenumbers */ for (index_k=0; index_k < ppm->lnk_size; index_k++) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_call_parallel(primordial_inflation_one_wavenumber(ppt,ppm,ppr,y_ini,index_k), ppm->error_message, ppm->error_message); #ifdef _OPENMP tstop = omp_get_wtime(); tspent += tstop-tstart; #endif } #ifdef _OPENMP if (ppm->primordial_verbose>1) printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n", __func__,tspent,thread); #endif } /* end of parallel zone */ if (abort == _TRUE_) return _FAILURE_; ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_; ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_; return _SUCCESS_; } /** * Routine coordinating the computation of the primordial * spectrum for one wavenumber. It calls primordial_inflation_one_k() to * integrate the perturbation equations, and then it stores the result * for the scalar/tensor spectra. * * @param ppt Input: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled * @param index_k Input: index of wavenumber to be considered * @return the error status */ int primordial_inflation_one_wavenumber( struct perturbs * ppt, struct primordial * ppm, struct precision * ppr, double * y_ini, int index_k ) { double k; double curvature,tensors; double * y; double * dy; k = exp(ppm->lnk[index_k]); /** Summary */ /** - allocate vectors for background/perturbed quantities */ class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message); class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message); /** - initialize the background part of the running vector */ y[ppm->index_in_a] = y_ini[ppm->index_in_a]; y[ppm->index_in_phi] = y_ini[ppm->index_in_phi]; if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi]; /** - evolve the background until the relevant initial time for integrating perturbations */ class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, k/ppr->primordial_inflation_ratio_min, _FALSE_, forward, conformal), ppm->error_message, ppm->error_message); /** - evolve the background/perturbation equations from this time and until some time after Horizon crossing */ class_call(primordial_inflation_one_k(ppm, ppr, k, y, dy, &curvature, &tensors), ppm->error_message, ppm->error_message); free(y); free(dy); class_test(curvature<=0., ppm->error_message, "negative curvature spectrum"); class_test(tensors<=0., ppm->error_message, "negative tensor spectrum"); /** - store the obtained result for curvature and tensor perturbations */ ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature); ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors); /* uncomment if you want to print here the spectra for testing */ /* fprintf(stderr,"%e %e %e\n", */ /* ppm->lnk[index_k], */ /* ppm->lnpk[ppt->index_md_scalars][index_k], */ /* ppm->lnpk[ppt->index_md_tensors][index_k]); */ return _SUCCESS_; } /** * Routine integrating the background plus perturbation equations for * each wavenumber, and returning the scalar and tensor spectrum. * * @param ppm Input: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param k Input: Fourier wavenumber * @param y Input: running vector of background/perturbations, already allocated and initialized * @param dy Input: running vector of background/perturbation derivatives, already allocated * @param curvature Output: curvature perturbation * @param tensor Output: tensor perturbation * @return the error status */ int primordial_inflation_one_k( struct primordial * ppm, struct precision * ppr, double k, double * y, double * dy, double * curvature, double * tensor ) { /** Summary: */ /** - define local variables */ double tau_start,tau_end,dtau; double z,ksi2,ah2; double aH; double curvature_old; double curvature_new; double dlnPdN; struct primordial_inflation_parameters_and_workspace pipaw; struct generic_integrator_workspace gi; /** - initialize the generic integrator (same integrator already used in background, thermodynamics and perturbation modules) */ pipaw.ppm = ppm; pipaw.N = ppm->in_size; pipaw.integrate = forward; pipaw.time = conformal; pipaw.k = k; class_call(initialize_generic_integrator(pipaw.N,&gi), gi.error_message, ppm->error_message); /* initial conditions for the perturbations, Bunch-Davies vacuum */ y[ppm->index_in_ksi_re]=1./sqrt(2.*k); y[ppm->index_in_ksi_im]=0.; y[ppm->index_in_dksi_re]=0.; y[ppm->index_in_dksi_im]=-k*y[ppm->index_in_ksi_re]; y[ppm->index_in_ah_re]=1./sqrt(2.*k); y[ppm->index_in_ah_im]=0.; y[ppm->index_in_dah_re]=0.; y[ppm->index_in_dah_im]=-k*y[ppm->index_in_ah_re]; /** - initialize variable used for deciding when to stop the calculation (= when the curvature remains stable) */ curvature_new = _HUGE_; /** - initialize conformal time to arbitrary value (here, only variations of tau matter: the equations that we integrate do not depend explicitly on time) */ tau_end = 0; /** - compute derivative of initial vector and infer first value of adaptive time-step */ class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_ /MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k); /** - loop over time */ do { /* new time interval [tau_start, tau_end] over which equations will be integrated */ tau_start = tau_end; tau_end = tau_start + dtau; class_test(dtau/tau_start < ppr->smallest_allowed_variation, ppm->error_message, "integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start); /* evolve the system */ class_call(generic_integrator(primordial_inflation_derivs, tau_start, tau_end, y, &pipaw, ppr->primordial_inflation_tol_integration, ppr->smallest_allowed_variation, &gi), gi.error_message, ppm->error_message); /* compute derivatives at tau_end, useful to infer new time step and spectra */ class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); /* new time step */ dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_ /MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k); /* new aH */ aH = dy[ppm->index_in_a]/y[ppm->index_in_a]; /* store previous value of curvature (at tau_start) */ curvature_old = curvature_new; /* new curvature */ z = y[ppm->index_in_a]*dy[ppm->index_in_phi]/aH; ksi2 = y[ppm->index_in_ksi_re]*y[ppm->index_in_ksi_re]+y[ppm->index_in_ksi_im]*y[ppm->index_in_ksi_im]; curvature_new = k*k*k/2./_PI_/_PI_*ksi2/z/z; /* variation of curvature with time (dimensionless) */ dlnPdN = (curvature_new-curvature_old)/dtau*y[ppm->index_in_a]/dy[ppm->index_in_a]/curvature_new; /* stop when (k >> aH) AND curvature is stable */ } while ((k/aH >= ppr->primordial_inflation_ratio_max) || (fabs(dlnPdN) > ppr->primordial_inflation_tol_curvature)); /** - clean the generic integrator */ class_call(cleanup_generic_integrator(&gi), gi.error_message, ppm->error_message); /** - store final value of curvature for this wavenumber */ *curvature = curvature_new; /** - store final value of tensor perturbation for this wavenumber */ ah2 = y[ppm->index_in_ah_re]*y[ppm->index_in_ah_re]+y[ppm->index_in_ah_im]*y[ppm->index_in_ah_im]; *tensor = 32.*k*k*k/_PI_*ah2/y[ppm->index_in_a]/y[ppm->index_in_a]; //fprintf(stdout,"%g %g %g %g %g\n",k,*curvature,*tensor,*tensor/(*curvature),dlnPdN); return _SUCCESS_; } /** * Routine searching for the inflationary attractor solution at a * given phi_0, by iterations, with a given tolerance. If no solution * found within tolerance, returns error message. The principle is the * following. The code starts integrating the background equations * from various values of phi, corresponding to earlier and earlier * value before phi_0, and separated by a small arbitrary step size, * corresponding roughly to 1 e-fold of inflation. Each time, the * integration starts with the initial condition \f$ \phi=-V'/3H\f$ (slow-roll * prediction). If the found value of \f$\phi'\f$ in phi_0 is stable (up to * the parameter "precision"), the code considers that there is an * attractor, and stops iterating. If this process does not converge, * it returns an error message. * * @param ppm Input: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param phi_0 Input: field value at which we wish to find the solution * @param precision Input: tolerance on output values (if too large, an attractor will always considered to be found) * @param y Input: running vector of background variables, already allocated and initialized * @param dy Input: running vector of background derivatives, already allocated * @param H_0 Output: Hubble value at phi_0 for attractor solution * @param dphidt_0 Output: field derivative value at phi_0 for attractor solution * @return the error status */ int primordial_inflation_find_attractor( struct primordial * ppm, struct precision * ppr, double phi_0, double precision, double * y, double * dy, double * H_0, double * dphidt_0 ) { double V_0,dV_0,ddV_0; double V=0.,dV=0.,ddV=0.; double a; double dphidt,dphidt_0new,dphidt_0old,phi; int counter; /* we want a series of value of phi' in phi_0, obtained by integrating the system from earlier and earlier time. The first value iof the series is the slow-roll prediction phi' = -V'/3H. The following lines compute this value and initialize relevant quantities. */ class_call(primordial_inflation_check_potential(ppm,phi_0,&V_0,&dV_0,&ddV_0), ppm->error_message, ppm->error_message); dphidt_0new = -dV_0/3./sqrt((8.*_PI_/3.)*V_0); phi = phi_0; counter = 0; dphidt_0old = dphidt_0new/(precision+2.); // this silly value just // ensures that the loop // below will be executed // at least once. /* loop over different values of phi, from which the background equations are integrated until phi_0 */ while (fabs(dphidt_0new/dphidt_0old-1.) >= precision) { counter ++; class_test(counter >= ppr->primordial_inflation_attractor_maxit, ppm->error_message, "could not converge after %d iterations: there exists no attractor solution near phi=%g. Potential probably too steep in this region, or precision parameter primordial_inflation_attractor_precision=%g too small", counter, phi_0, precision); dphidt_0old = dphidt_0new; /* take one step in phi, corresponding roughly to adding one more e-fold of inflation */ phi=phi+dV_0/V_0/16./_PI_; /* fix the initial phi' to the slow-roll prediction in that point, and initialize other relevant quantities */ class_call(primordial_inflation_check_potential(ppm,phi,&V,&dV,&ddV), ppm->error_message, ppm->error_message); a = 1.; dphidt = -dV/3./sqrt((8.*_PI_/3.)*V); y[ppm->index_in_a]=a; y[ppm->index_in_phi]=phi; y[ppm->index_in_dphi]=a*dphidt; /* evolve the background equations until phi_0 is reached */ class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _phi_, phi_0, _TRUE_, forward, conformal), ppm->error_message, ppm->error_message); /* compute phi' in phi_0, this is the new point in the series which convergence we want to check */ dphidt_0new = y[ppm->index_in_dphi]/y[ppm->index_in_a]; } /* if we have converged and found the attractor, we take the last value of phi' in phi_0 to be the correct one for the attractor solution */ *dphidt_0 = dphidt_0new; *H_0 = sqrt((8.*_PI_/3.)*(0.5*dphidt_0new*dphidt_0new+V_0)); if (ppm->primordial_verbose > 1) { printf(" (attractor found in phi=%g with phi'=%g, H=%g)\n",phi_0,*dphidt_0,*H_0); } return _SUCCESS_; } /** * Routine integrating background equations only, from initial values * stored in y, to a final value (if target = _aH_, until aH = * aH_stop; if target = _phi_, till phi = phi_stop; if target = * _end_inflation_, until \f$ d^2a/dt^2 = 0\f$ (here t = proper time)). In * output, y contains the final background values. In addition, if * check_epsilon is true, the routine controls at each step that the * expansion is accelerated and that inflation holds (wepsilon>1), * otherwise it returns an error. Thanks to the last argument, it is * also possible to specify whether the integration should be carried * forward or backward in time. For the inflation_H case, only a 1st * order differential equation is involved, so the forward and * backward case can be done exactly without problems. For the * inflation_V case, the equation of motion is 2nd order. What the * module will do in the backward case is to search for an approximate * solution, corresponding to the (first-order) attractor inflationary * solution. This approximate backward solution is used in order to * estimate some initial times, but the approximation made here will * never impact the final result: the module is written in such a way * that after using this approximation, the code always computes (and * relies on) the exact forward solution. * * @param ppm Input: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param y Input/output: running vector of background variables, already allocated and initialized * @param dy Input: running vector of background derivatives, already allocated * @param target Input: whether the goal is to reach a given aH or \f$ \phi \f$ * @param stop Input: the target value of either aH or \f$ \phi \f$ * @param check_epsilon Input: whether we should impose inflation (epsilon>1) at each step * @param direction Input: whether we should integrate forward or backward in time * @param time Input: definition of time (proper or conformal) * @return the error status */ int primordial_inflation_evolve_background( struct primordial * ppm, struct precision * ppr, double * y, double * dy, enum target_quantity target, double stop, short check_epsilon, enum integration_direction direction, enum time_definition time ) { struct primordial_inflation_parameters_and_workspace pipaw; struct generic_integrator_workspace gi; double tau_start,tau_end,dtau=0.; double H,dH,ddH,dddH; double epsilon,epsilon_old; double quantity=0.; double V,dV,ddV; double sign_dtau=0.; pipaw.ppm = ppm; pipaw.N = ppm->in_bg_size; if ((direction == backward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) { // -1 to remove the differential equation for phi', since we stick to the attractor pipaw.N -= 1; } pipaw.integrate = direction; pipaw.time = time; switch (direction) { case forward: sign_dtau = 1.; break; case backward: sign_dtau = -1.; break; } class_call(initialize_generic_integrator(pipaw.N,&gi), gi.error_message, ppm->error_message); /* at starting point, compute eventually epsilon */ if (check_epsilon == _TRUE_) { class_call(primordial_inflation_get_epsilon(ppm, y[ppm->index_in_phi], &epsilon), ppm->error_message, ppm->error_message); } /* at starting point, compute the stepsize dtau */ tau_end = 0; class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); // compute timestep (if time = conformal, dtau is the conformal time step, // if time = proper, dtau is in fact dt, the proper time step) if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) { dtau = ppr->primordial_inflation_bg_stepsize *MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi])); } else { // minus sign for backward in time dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a]; } /* expected value of target quantity after the next step */ switch (target) { case _aH_: // next (approximate) value of aH after next step // (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx) // where dtau can be conformal or proper time quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau); if (time == conformal) quantity /= y[ppm->index_in_a]; break; case _phi_: // next (approximate) value of phi after next step quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau; break; case _end_inflation_: // in this case, the goal is to reach d2a/dt2 = 0 (end of accelerated expansion) stop = 0.; // current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2 quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi]; if (time == conformal) quantity /= pow(y[ppm->index_in_a],2); // check that we are in the right case class_test(ppm->primordial_spec_type != inflation_V_end, ppm->error_message, "the target _end_inflation_ is only coded to work with inflation_V_end (but could be generalized if needed)"); break; case _a_: // next (approximate) value of a after next step quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau; break; } /* loop over time steps, checking that there will be no overshooting */ while (sign_dtau*(quantity - stop) < 0.) { /* check that V(phi) or H(phi) do not take forbidden values (negative or positive derivative) */ if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) { class_call(primordial_inflation_check_potential(ppm, y[ppm->index_in_phi], &V, &dV, &ddV), ppm->error_message, ppm->error_message); } else { class_call(primordial_inflation_check_hubble(ppm, y[ppm->index_in_phi], &H, &dH, &ddH, &dddH), ppm->error_message, ppm->error_message); } /* take one time step */ tau_start = tau_end; tau_end = tau_start + dtau; // mind the fabs(...) below (works for both forward and backward integration) class_test(fabs(dtau/tau_start) < ppr->smallest_allowed_variation, ppm->error_message, "integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start); class_call(generic_integrator(primordial_inflation_derivs, tau_start, tau_end, y, &pipaw, ppr->primordial_inflation_tol_integration, ppr->smallest_allowed_variation, &gi), gi.error_message, ppm->error_message); /* eventually, check that epsilon is not becoming greater than one */ if (check_epsilon == _TRUE_) { epsilon_old = epsilon; class_call_except(primordial_inflation_get_epsilon(ppm, y[ppm->index_in_phi], &epsilon), ppm->error_message, ppm->error_message, cleanup_generic_integrator(&gi)); class_test_except((epsilon > 1) && (epsilon_old <= 1), ppm->error_message, cleanup_generic_integrator(&gi), "Inflaton evolution crosses the border from epsilon<1 to epsilon>1 at phi=%g. Inflation disrupted during the observable e-folds", y[ppm->index_in_phi]); } /* recompute new value of next conformal time step */ class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); // compute timestep (if time = conformal, dtau is the conformal time step, // if time = proper, dtau is in fact dt, the proper time step) if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) { dtau = ppr->primordial_inflation_bg_stepsize *MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi])); } else { // minus sign for backward in time dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a]; } /* expected value of target quantity after the next step */ switch (target) { case _aH_: // next (approximate) value of aH after next step // (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx) // where dtau can be conformal or proper time quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau); if (time == conformal) quantity /= y[ppm->index_in_a]; break; case _phi_: // next (approximate) value of phi after next step quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau; break; case _end_inflation_: // current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2 quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi]; if (time == conformal) quantity /= pow(y[ppm->index_in_a],2); break; case _a_: // next (approximate) value of a after next step quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau; break; } } /* won't use the integrator anymore */ class_call(cleanup_generic_integrator(&gi), gi.error_message, ppm->error_message); /* Perform one last step with a simple trapezoidal integral. This will bring exactly phi or a forward to phi_stop or a_stop, or approximately aH forward to aH_stop, or approximately [-d2a/dt2 /a] backward to zero. */ switch (target) { case _aH_: switch (time){ case proper: dtau = (stop/dy[ppm->index_in_a]-1.)/dy[ppm->index_in_a]; break; case conformal: dtau = (stop/(dy[ppm->index_in_a]/y[ppm->index_in_a])-1.)/(dy[ppm->index_in_a]/y[ppm->index_in_a]); break; } break; case _phi_: dtau = (stop-y[ppm->index_in_phi])/dy[ppm->index_in_phi]; break; case _end_inflation_: class_call(primordial_inflation_check_potential(ppm,y[ppm->index_in_phi],&V,&dV,&ddV), ppm->error_message, ppm->error_message); // We can easily pull back quantity=-d2a/dt2 /a by noticing that // d(quantity)/dtau = 8piG phi' phi'' / a^2 (exact relation!) // or // d(quantity)/dtau = 8piG phi^dot (a phi^dot)^dot = 8piG phi^dot (a^dot phi^dot+ a phi^dotdot) // By taking the step dtau = - quantity / [d(quantity)/dtau] we nearly reach quantity=0 (end of inflation), up to very good approximation switch (time){ case proper: dtau = -quantity/(8.*_PI_*dy[ppm->index_in_phi]*(dy[ppm->index_in_a]*dy[ppm->index_in_phi]+y[ppm->index_in_a]*dy[ppm->index_in_dphi])); break; case conformal: dtau = -quantity/(8.*_PI_/y[ppm->index_in_a]/y[ppm->index_in_a]*dy[ppm->index_in_phi]*dy[ppm->index_in_dphi]); break; } break; case _a_: dtau = (stop-y[ppm->index_in_a])/dy[ppm->index_in_a]; break; } y[ppm->index_in_a] += dy[ppm->index_in_a]*dtau; y[ppm->index_in_phi] += dy[ppm->index_in_phi]*dtau; if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V)||(ppm->primordial_spec_type == inflation_V_end))) y[ppm->index_in_dphi] += dy[ppm->index_in_dphi]*dtau; // this last step updates also the dy[] class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); // uncomment if you want to test that the routine really reached the point at which d2a/dt2=0 /* if (target == _end_inflation_) { class_call(primordial_inflation_derivs(tau_end, y, dy, &pipaw, ppm->error_message), ppm->error_message, ppm->error_message); aH = dy[ppm->index_in_a]/y[ppm->index_in_a]; quantity = (-aH*aH + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi])/y[ppm->index_in_a]/y[ppm->index_in_a]; if (ppm->primordial_verbose>1) printf(" (-d2a/dt2 /a = %e)\n",quantity); } */ return _SUCCESS_; } /** * Routine checking positivity and negative slope of potential. The * negative slope is an arbitrary choice. Currently the code can only * deal with monotonic variations of the inflaton during inflation. So * the slope had to be always negative or always positive... we took * the first option. * * @param ppm Input: pointer to primordial structure * @param phi Input: field value where to perform the check * @param V Output: inflaton potential in units of \f$ Mp^4\f$ * @param dV Output: first derivative of inflaton potential wrt the field * @param ddV Output: second derivative of inflaton potential wrt the field * @return the error status */ int primordial_inflation_check_potential( struct primordial * ppm, double phi, double * V, double * dV, double * ddV ) { class_call(primordial_inflation_potential(ppm,phi,V,dV,ddV), ppm->error_message, ppm->error_message); class_test(*V <= 0., ppm->error_message, "This potential becomes negative at phi=%g, before the end of observable inflation. It cannot be treated by this code", phi); class_test(*dV >= 0., ppm->error_message, "All the code is written for the case dV/dphi<0. Here, in phi=%g, we have dV/dphi=%g. This potential cannot be treated by this code", phi,*dV); return _SUCCESS_; } /** * Routine checking positivity and negative slope of \f$ H(\phi)\f$. The * negative slope is an arbitrary choice. Currently the code can only * deal with monotonic variations of the inflaton during * inflation. And H can only decrease with time. So the slope \f$ dH/d\phi\f$ * has to be always negative or always positive... we took the first * option: phi increases, H decreases. * * @param ppm Input: pointer to primordial structure * @param phi Input: field value where to perform the check * @param H Output: Hubble parameters in units of Mp * @param dH Output: \f$ dH / d\phi \f$ * @param ddH Output: \f$ d^2H / d\phi^2 \f$ * @param dddH Output: \f$ d^3H / d\phi^3 \f$ * @return the error status */ int primordial_inflation_check_hubble( struct primordial * ppm, double phi, double * H, double * dH, double * ddH, double * dddH ) { class_call(primordial_inflation_hubble(ppm, phi, H,dH,ddH,dddH), ppm->error_message, ppm->error_message); class_test(*H < 0., ppm->error_message, "this H(phi) is not physical. H = %e", *H); class_test(*dH > 0., ppm->error_message, "this H(phi) is not decreasing with growing phi. dH/dphi = %e", *dH); return _SUCCESS_; } /** * Routine computing the first slow-roll parameter epsilon * * @param ppm Input: pointer to primordial structure * @param phi Input: field value where to compute epsilon * @param epsilon Output: result * @return the error status */ int primordial_inflation_get_epsilon( struct primordial * ppm, double phi, double * epsilon ) { double V,dV,ddV; double H,dH,ddH,dddH; switch (ppm->primordial_spec_type) { case inflation_V: case inflation_V_end: class_call(primordial_inflation_potential(ppm, phi, &V,&dV,&ddV), ppm->error_message, ppm->error_message); *epsilon = 1./16./_PI_*pow(dV/V,2); //*eta = 1./8./pi*(ddV/V) break; case inflation_H: class_call(primordial_inflation_hubble(ppm, phi, &H,&dH,&ddH,&dddH), ppm->error_message, ppm->error_message); *epsilon = 1./4./_PI_*pow(dH/H,2); break; default: class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type); break; } return _SUCCESS_; } /** * Routine searching phi_pivot when a given amount of inflation is requested. * * @param ppm Input/output: pointer to primordial structure * @param ppr Input: pointer to precision structure * @param y Input: running vector of background variables, already allocated and initialized * @param dy Input: running vector of background derivatives, already allocated * @return the error status */ int primordial_inflation_find_phi_pivot( struct primordial * ppm, struct precision * ppr, double * y, double * dy ) { /** Summary: */ /** - define local variables */ double epsilon,dphi; double phi_try,H_try,dphidt_try,ratio_try=0.; double phi_left,phi_right,phi_mid; double phi_small_epsilon,phi_stop; double dphidt_small_epsilon; double H_small_epsilon; double aH_ratio_after_small_epsilon=0.; double a_ratio_after_small_epsilon=0.; double target=0.; double a_pivot,aH_pivot; double rho_end; double h; double H0; double rho_c0; double sigma_B; double Omega_g0; double Omega_r0; /** - check whether in vicinity of phi_end, inflation is still ongoing */ class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-ppr->primordial_inflation_end_dphi,&epsilon), ppm->error_message, ppm->error_message); /** - case in which epsilon>1: hence we must find the value phi_stop < phi_end where inflation ends up naturally */ if (epsilon > 1.) { // assume that inflation ends up naturally /** - --> find latest value of the field such that epsilon = primordial_inflation_small_epsilon (default: 0.1) */ /** - --> bracketing right-hand value is phi_end (but the potential will not be evaluated exactly there, only closeby */ phi_right = ppm->phi_end; /** - --> bracketing left-hand value is found by iterating with logarithmic step until epsilon < primordial_inflation_small_epsilon */ dphi = ppr->primordial_inflation_end_dphi; do { dphi *= ppr->primordial_inflation_end_logstep; class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-dphi,&epsilon), ppm->error_message, ppm->error_message); } while (epsilon > ppr->primordial_inflation_small_epsilon); phi_left = ppm->phi_end-dphi; /** - --> find value such that epsilon = primordial_inflation_small_epsilon by bisection */ do { phi_mid = 0.5*(phi_left+phi_right); class_call(primordial_inflation_get_epsilon(ppm,phi_mid,&epsilon), ppm->error_message, ppm->error_message); if (epsilon < ppr->primordial_inflation_small_epsilon) phi_left=phi_mid; else phi_right=phi_mid; } while (fabs(epsilon-ppr->primordial_inflation_small_epsilon) > ppr->primordial_inflation_small_epsilon_tol); /** - --> value found and stored as phi_small_epsilon */ phi_small_epsilon = phi_mid; /** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<<1 there) */ class_call(primordial_inflation_find_attractor(ppm, ppr, phi_small_epsilon, ppr->primordial_inflation_attractor_precision_initial, y, dy, &H_small_epsilon, &dphidt_small_epsilon), ppm->error_message, ppm->error_message); /** - --> compute amount of inflation between this phi_small_epsilon and the end of inflation */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_small_epsilon; y[ppm->index_in_dphi]=y[ppm->index_in_a]*dphidt_small_epsilon; class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _end_inflation_, 0., _FALSE_, forward, conformal), ppm->error_message, ppm->error_message); // we have used here conformal time, so aH = dy[a]/y[a] aH_ratio_after_small_epsilon = dy[ppm->index_in_a]/y[ppm->index_in_a]/H_small_epsilon; a_ratio_after_small_epsilon = y[ppm->index_in_a]; switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: /* get the target value of ln_aH_ratio */ rho_end = 2./8./_PI_*pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2); rho_end = 8*_PI_/3.*rho_end/(_G_*_h_P_/pow(_c_,3))*pow(_Mpc_over_m_,2); h = 0.7; H0 = h * 1.e5 / _c_; rho_c0 = pow(H0,2); sigma_B = 2. * pow(_PI_,5) * pow(_k_B_,4) / 15. / pow(_h_P_,3) / pow(_c_,2); Omega_g0 = (4.*sigma_B/_c_*pow(2.726,4.)) / (3.*_c_*_c_*1.e10*h*h/_Mpc_over_m_/_Mpc_over_m_/8./_PI_/_G_); Omega_r0 = 3.046*7./8.*pow(4./11.,4./3.)*Omega_g0; target = log(H0/0.05*pow(Omega_r0,0.5)*pow(2./100.,1./12.)*pow(rho_end/rho_c0,0.25)); //fprintf(stderr,"auto: log(aH_end/aH_*)=%e\n",target); break; case ln_aH_ratio: target = ppm->phi_pivot_target; //fprintf(stderr,"fixed: log(aH_end/aH_*)=%e\n",target); break; case N_star: target = ppm->phi_pivot_target; //fprintf(stderr,"fixed: log(a_end/a_*)=%e\n",target); break; } /** - --> by starting from phi_small_epsilon and integrating an approximate solution backward in time, try to estimate roughly a value close to phi_pivot but a bit smaller. This is done by trying to reach an amount of inflation equal to the requested one, minus the amount after phi_small_epsilon, and plus primordial_inflation_extra_efolds efolds (default: two). Note that it is not aggressive to require two extra e-folds of inflation before the pivot, since the calculation of the spectrum in the observable range will require even more. */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_small_epsilon; switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message); break; case N_star: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _a_, 1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message); break; } /* we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */ phi_try = y[ppm->index_in_phi]; /** - --> find attractor in phi_try */ class_call(primordial_inflation_find_attractor(ppm, ppr, phi_try, ppr->primordial_inflation_attractor_precision_initial, y, dy, &H_try, &dphidt_try), ppm->error_message, ppm->error_message); /** - --> check the total amount of inflation between phi_try and the end of inflation */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_try; y[ppm->index_in_dphi]= dphidt_try; class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _end_inflation_, 0., _FALSE_, forward, proper), ppm->error_message, ppm->error_message); switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: // aH_ratio (we have used here proper time, so aH = dy[a]) ratio_try = dy[ppm->index_in_a]/H_try; break; case N_star: // a_ratio ratio_try = y[ppm->index_in_a]; break; } class_test(log(ratio_try) < target, ppm->error_message, "phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds", log(ratio_try), target); phi_stop = y[1]; if (ppm->primordial_verbose > 1) printf(" (inflation stops in phi_stop = %e)\n",phi_stop); /** - --> go back to phi_try, and now find phi_pivot such that the amount of inflation between phi_pivot and the end of inflation is exactly the one requested. */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_try; y[ppm->index_in_dphi]= dphidt_try; switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, H_try*ratio_try/exp(target), _FALSE_, forward, proper), ppm->error_message, ppm->error_message); break; case N_star: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _a_, ratio_try/exp(target), _FALSE_, forward, proper), ppm->error_message, ppm->error_message); break; } ppm->phi_pivot = y[1]; if (ppm->primordial_verbose > 1) { printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot); /* - --> In verbose mode, check that phi_pivot is correct. Done by restarting from phi_pivot and going again till the end of inflation. */ aH_pivot = dy[0]; a_pivot = y[0]; class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _end_inflation_, 0., _FALSE_, forward, proper), ppm->error_message, ppm->error_message); printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot)); } } /** - case in which epsilon<1: */ else { /** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<1 there) */ class_call(primordial_inflation_find_attractor(ppm, ppr, ppm->phi_end, ppr->primordial_inflation_attractor_precision_initial, y, dy, &H_small_epsilon, &dphidt_small_epsilon), ppm->error_message, ppm->error_message); /** - --> by starting from phi_end and integrating an approximate solution backward in time, try to estimate roughly a value close to phi_pivot but a bit smaller. This is done by trying to reach an amount of inflation equal to the requested one, minus the amount after phi_small_epsilon, and plus primordial_inflation_extra_efolds efolds (default: two). Note that it is not aggressive to require two extra e-folds of inflation before the pivot, since the calculation of the spectrum in the observable range will require even more. */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= ppm->phi_end; switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message); break; case N_star: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _a_, 1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon, _TRUE_, backward, conformal), ppm->error_message, ppm->error_message); break; } /** - --> we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */ phi_try = y[ppm->index_in_phi]; /** - --> find attractor in phi_try */ class_call(primordial_inflation_find_attractor(ppm, ppr, phi_try, ppr->primordial_inflation_attractor_precision_initial, y, dy, &H_try, &dphidt_try), ppm->error_message, ppm->error_message); /** - --> check the total amount of inflation between phi_try and the end of inflation */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_try; y[ppm->index_in_dphi]= dphidt_try; class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _phi_, ppm->phi_end, _FALSE_, forward, proper), ppm->error_message, ppm->error_message); switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: // aH_ratio (we have used here proper time, so aH = dy[a]) ratio_try = dy[ppm->index_in_a]/H_try; break; case N_star: // a_ratio ratio_try = y[ppm->index_in_a]; break; } class_test(log(ratio_try) < target, ppm->error_message, "phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds", log(ratio_try), target); phi_stop = y[1]; if (ppm->primordial_verbose > 1) printf(" (inflation stops in phi_stop = %e)\n",phi_stop); /** - --> go back to phi_try, and now find phi_pivot such that the amount of inflation between phi_pivot and the end of inflation is exactly the one requested. */ y[ppm->index_in_a]=1.; y[ppm->index_in_phi]= phi_try; y[ppm->index_in_dphi]= dphidt_try; switch (ppm->phi_pivot_method) { case ln_aH_ratio_auto: case ln_aH_ratio: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _aH_, H_try*ratio_try/exp(target), _FALSE_, forward, proper), ppm->error_message, ppm->error_message); break; case N_star: class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _a_, ratio_try/exp(target), _FALSE_, forward, proper), ppm->error_message, ppm->error_message); break; } ppm->phi_pivot = y[1]; if (ppm->primordial_verbose > 1) { printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot); /** - --> In verbose mode, check that phi_pivot is correct. Done by restarting from phi_pivot and going again till the end of inflation. */ aH_pivot = dy[0]; a_pivot = y[0]; class_call(primordial_inflation_evolve_background(ppm, ppr, y, dy, _phi_, ppm->phi_end, _FALSE_, forward, proper), ppm->error_message, ppm->error_message); printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot)); } } return _SUCCESS_; } /** * Routine returning derivative of system of background/perturbation * variables. Like other routines used by the generic integrator * (background_derivs, thermodynamics_derivs, perturb_derivs), this * routine has a generic list of arguments, and a slightly different * error management, with the error message returned directly in an * ErrMsg field. * * @param tau Input: time (not used explicitly inside the routine, but requested by the generic integrator) * @param y Input/output: running vector of background variables, already allocated and initialized * @param dy Input: running vector of background derivatives, already allocated * @param parameters_and_workspace Input: all necessary input variables apart from y * @param error_message Output: error message * @return the error status */ int primordial_inflation_derivs( double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { struct primordial_inflation_parameters_and_workspace * ppipaw; struct primordial * ppm; ppipaw = parameters_and_workspace; ppm = ppipaw->ppm; // a2 ppipaw->a2=y[ppm->index_in_a]*y[ppm->index_in_a]; // BACKGROUND switch (ppm->primordial_spec_type) { case inflation_V: case inflation_V_end: class_call(primordial_inflation_potential(ppm, y[ppm->index_in_phi], &(ppipaw->V), &(ppipaw->dV), &(ppipaw->ddV)), ppm->error_message, ppm->error_message); switch (ppipaw->integrate) { case forward: switch (ppipaw->time) { case conformal: // a H = a'/a ppipaw->aH = sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->a2*ppipaw->V)); // 1: a dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH; // 2: phi dy[ppm->index_in_phi]=y[ppm->index_in_dphi]; // 3: dphi/dtau dy[ppm->index_in_dphi]=-2.*ppipaw->aH*y[ppm->index_in_dphi]-ppipaw->a2*ppipaw->dV; break; case proper: // a H = adot ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->V)); // 1: a dy[ppm->index_in_a]=ppipaw->aH; // 2: phi dy[ppm->index_in_phi]=y[ppm->index_in_dphi]; // 3: dphi/dt dy[ppm->index_in_dphi]=-3.*ppipaw->aH/y[ppm->index_in_a]*y[ppm->index_in_dphi]-ppipaw->dV; break; } // z''/z (assumes that conformal time is requested) ppipaw->zpp_over_z= 2*ppipaw->aH*ppipaw->aH - ppipaw->a2*ppipaw->ddV - 4.*_PI_*(7.*y[ppm->index_in_dphi]*y[ppm->index_in_dphi] +4.*y[ppm->index_in_dphi]/ppipaw->aH*ppipaw->a2*ppipaw->dV) +32.*_PI_*_PI_*pow(y[ppm->index_in_dphi],4)/pow(ppipaw->aH,2); // a''/a (assumes that conformal time is requested) ppipaw->app_over_a=2.*ppipaw->aH*ppipaw->aH - 4.*_PI_*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]; break; // For backward integration of approximate slow-roll solution: // Neglect kinetic energy of the field phi'^2/(2a^2) w.r.t. potential energy V // Neglect phi'' w.r.t 2aHphi', reducing 2nd order Klein-Gordon to approximate 1st-order case backward: switch (ppipaw->time) { case conformal: // a H = a'/a ppipaw->aH = sqrt((8*_PI_/3.)*ppipaw->a2*ppipaw->V); // 1: a dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH; // 2: phi dy[ppm->index_in_phi]= -ppipaw->a2*ppipaw->dV/3./ppipaw->aH; break; case proper: // a H = da/dt ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*ppipaw->V); // 1: a dy[ppm->index_in_a]=ppipaw->aH; // 2: phi dy[ppm->index_in_phi]= -ppipaw->dV/3./ppipaw->aH*y[ppm->index_in_a]; break; } break; } break; case inflation_H: class_call(primordial_inflation_hubble(ppm, y[ppm->index_in_phi], &(ppipaw->H), &(ppipaw->dH), &(ppipaw->ddH), &(ppipaw->dddH)), ppm->error_message, ppm->error_message); switch (ppipaw->time) { case conformal: // 1: a dy[ppm->index_in_a]=ppipaw->a2*ppipaw->H; // 2: phi dy[ppm->index_in_phi]=-1./4./_PI_*y[ppm->index_in_a]*ppipaw->dH; break; case proper: // 1: a dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->H; // 2: phi dy[ppm->index_in_phi]=-1./4./_PI_*ppipaw->dH; break; } // z''/z (assumes that conformal time is requested) ppipaw->zpp_over_z = 2. *ppipaw->a2*ppipaw->H*ppipaw->H -3./4./_PI_ *ppipaw->a2*ppipaw->H*ppipaw->ddH +1./16./_PI_/_PI_*ppipaw->a2*ppipaw->ddH*ppipaw->ddH +1./16./_PI_/_PI_*ppipaw->a2*ppipaw->dH*ppipaw->dddH -1./4./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->ddH/ppipaw->H +1./2./_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH +1./8./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->dH*ppipaw->dH/ppipaw->H/ppipaw->H; // a''/a (assumes that conformal time is requested) ppipaw->app_over_a = 2.*ppipaw->a2*ppipaw->H*ppipaw->H -4.*_PI_*dy[ppm->index_in_phi]*dy[ppm->index_in_phi]; break; default: class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type); break; } if (ppipaw->N <= ppm->in_bg_size) // mind the <= instead of ==, necessary because for backward integration 1 equation is removed return _SUCCESS_; // PERTURBATIONS class_test(ppipaw->time == proper, ppm->error_message, "For inflaton perturbations, only conformal time is coded."); // SCALARS // 4: ksi_re dy[ppm->index_in_ksi_re]=y[ppm->index_in_dksi_re]; // 5: ksi_im dy[ppm->index_in_ksi_im]=y[ppm->index_in_dksi_im]; // 6: d ksi_re / dtau dy[ppm->index_in_dksi_re]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_re]; // 7: d ksi_im / dtau dy[ppm->index_in_dksi_im]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_im]; // TENSORS // 8: ah_re dy[ppm->index_in_ah_re]=y[ppm->index_in_dah_re]; // 9: ah_im dy[ppm->index_in_ah_im]=y[ppm->index_in_dah_im]; // 10: d ah_re / dtau dy[ppm->index_in_dah_re]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_re]; // 11: d ah_im / dtau dy[ppm->index_in_dah_im]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_im]; return _SUCCESS_; } /** * This routine reads the primordial spectrum from an external command, * and stores the tabulated values. * The sampling of the k's given by the external command is preserved. * * Author: Jesus Torrado (torradocacho@lorentz.leidenuniv.nl) * Date: 2013-12-20 * * @param ppt Input/output: pointer to perturbation structure * @param ppm Input/output: pointer to primordial structure * @return the error status */ int primordial_external_spectrum_init( struct perturbs * ppt, struct primordial * ppm ) { /** Summary: */ char arguments[_ARGUMENT_LENGTH_MAX_]; char line[_LINE_LENGTH_MAX_]; char command_with_arguments[2*_ARGUMENT_LENGTH_MAX_]; FILE *process; int n_data_guess, n_data = 0; double *k = NULL, *pks = NULL, *pkt = NULL, *tmp = NULL; double this_k, this_pks, this_pkt; int status; int index_k; /** - Initialization */ /* Prepare the data (with some initial size) */ n_data_guess = 100; k = (double *)malloc(n_data_guess*sizeof(double)); pks = (double *)malloc(n_data_guess*sizeof(double)); if (ppt->has_tensors == _TRUE_) pkt = (double *)malloc(n_data_guess*sizeof(double)); /* Prepare the command */ /* If the command is just a "cat", no arguments need to be passed */ if(strncmp("cat ", ppm->command, 4) == 0) { sprintf(arguments, " "); } /* otherwise pass the list of arguments */ else { sprintf(arguments, " %g %g %g %g %g %g %g %g %g %g", ppm->custom1, ppm->custom2, ppm->custom3, ppm->custom4, ppm->custom5, ppm->custom6, ppm->custom7, ppm->custom8, ppm->custom9, ppm->custom10); } /* write the actual command in a string */ sprintf(command_with_arguments, "%s %s", ppm->command, arguments); if (ppm->primordial_verbose > 0) printf(" -> running: %s\n",command_with_arguments); /** - Launch the command and retrieve the output */ /* Launch the process */ process = popen(command_with_arguments, "r"); class_test(process == NULL, ppm->error_message, "The program failed to set the environment for the external command. Maybe you ran out of memory."); /* Read output and store it */ while (fgets(line, sizeof(line)-1, process) != NULL) { if (ppt->has_tensors == _TRUE_) { sscanf(line, "%lf %lf %lf", &this_k, &this_pks, &this_pkt); } else { sscanf(line, "%lf %lf", &this_k, &this_pks); } /* Standard technique in C: if too many data, double the size of the vectors */ /* (it is faster and safer that reallocating every new line) */ if((n_data+1) > n_data_guess) { n_data_guess *= 2; tmp = (double *)realloc(k, n_data_guess*sizeof(double)); class_test(tmp == NULL, ppm->error_message, "Error allocating memory to read the external spectrum.\n"); k = tmp; tmp = (double *)realloc(pks, n_data_guess*sizeof(double)); class_test(tmp == NULL, ppm->error_message, "Error allocating memory to read the external spectrum.\n"); pks = tmp; if (ppt->has_tensors == _TRUE_) { tmp = (double *)realloc(pkt, n_data_guess*sizeof(double)); class_test(tmp == NULL, ppm->error_message, "Error allocating memory to read the external spectrum.\n"); pkt = tmp; }; }; /* Store */ k [n_data] = this_k; pks[n_data] = this_pks; if (ppt->has_tensors == _TRUE_) { pkt[n_data] = this_pkt; } n_data++; /* Check ascending order of the k's */ if(n_data>1) { class_test(k[n_data-1] <= k[n_data-2], ppm->error_message, "The k's are not strictly sorted in ascending order, " "as it is required for the calculation of the splines.\n"); } } /* Close the process */ status = pclose(process); class_test(status != 0., ppm->error_message, "The attempt to launch the external command was unsuccessful. " "Try doing it by hand to check for errors."); /* Test limits of the k's */ class_test(k[1] > ppt->k_min, ppm->error_message, "Your table for the primordial spectrum does not have " "at least 2 points before the minimum value of k: %e . " "The splines interpolation would not be safe.",ppt->k_min); class_test(k[n_data-2] < ppt->k_max, ppm->error_message, "Your table for the primordial spectrum does not have " "at least 2 points after the maximum value of k: %e . " "The splines interpolation would not be safe.",ppt->k_max); /** - Store the read results into CLASS structures */ ppm->lnk_size = n_data; /** - Make room */ class_realloc(ppm->lnk, ppm->lnk, ppm->lnk_size*sizeof(double), ppm->error_message); class_realloc(ppm->lnpk[ppt->index_md_scalars], ppm->lnpk[ppt->index_md_scalars], ppm->lnk_size*sizeof(double), ppm->error_message); class_realloc(ppm->ddlnpk[ppt->index_md_scalars], ppm->ddlnpk[ppt->index_md_scalars], ppm->lnk_size*sizeof(double), ppm->error_message); if (ppt->has_tensors == _TRUE_) { class_realloc(ppm->lnpk[ppt->index_md_tensors], ppm->lnpk[ppt->index_md_tensors], ppm->lnk_size*sizeof(double), ppm->error_message); class_realloc(ppm->ddlnpk[ppt->index_md_tensors], ppm->ddlnpk[ppt->index_md_tensors], ppm->lnk_size*sizeof(double), ppm->error_message); }; /** - Store values */ for (index_k=0; index_k<ppm->lnk_size; index_k++) { ppm->lnk[index_k] = log(k[index_k]); ppm->lnpk[ppt->index_md_scalars][index_k] = log(pks[index_k]); if (ppt->has_tensors == _TRUE_) ppm->lnpk[ppt->index_md_tensors][index_k] = log(pkt[index_k]); /* DEBUG (with tensors) fprintf(stderr,"Storing[%d(+1) of %d]: \n k = %g == %g\n pks = %g == %g\n pkt = %g == %g\n", index_k, n_data, ppm->lnk[index_k], log(k[index_k]), ppm->lnpk[ppt->index_md_scalars][index_k], log(pks[index_k]), ppm->lnpk[ppt->index_md_tensors][index_k], log(pkt[index_k])); */ }; /** - Release the memory used locally */ free(k); free(pks); if (ppt->has_tensors == _TRUE_) free(pkt); /** - Tell CLASS that there are scalar (and tensor) modes */ ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_; if (ppt->has_tensors == _TRUE_) ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_; return _SUCCESS_; } int primordial_output_titles(struct perturbs * ppt, struct primordial * ppm, char titles[_MAXTITLESTRINGLENGTH_] ){ class_store_columntitle(titles,"k [1/Mpc]",_TRUE_); class_store_columntitle(titles,"P_scalar(k)",_TRUE_); class_store_columntitle(titles,"P_tensor(k)",ppt->has_tensors); return _SUCCESS_; } int primordial_output_data(struct perturbs * ppt, struct primordial * ppm, int number_of_titles, double *data){ int index_k, storeidx; double *dataptr; for (index_k=0; index_k<ppm->lnk_size; index_k++) { dataptr = data + index_k*number_of_titles; storeidx = 0; class_store_double(dataptr, exp(ppm->lnk[index_k]), _TRUE_,storeidx); class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_scalars][index_k]), _TRUE_,storeidx); class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_tensors][index_k]), ppt->has_tensors,storeidx); } return _SUCCESS_; }
ps.c
/*** Some usefull math macros ***/ #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) static double mnarg1,mnarg2; #define FMAX(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) > (mnarg2) ?\ (mnarg1) : (mnarg2)) static double mnarg1,mnarg2; #define FMIN(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) < (mnarg2) ?\ (mnarg1) : (mnarg2)) #define ERFC_NPTS (int) 75 #define ERFC_PARAM_DELTA (float) 0.1 static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS]; static gsl_interp_accel *erfc_acc; static gsl_spline *erfc_spline; #define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration #define NMass 300 #define NSFR_high 200 #define NSFR_low 250 #define NGL_SFR 100 // 100 #define NMTURN 50//100 #define LOG10_MTURN_MAX ((double)(10)) #define LOG10_MTURN_MIN ((double)(5.-9e-8)) #define NR_END 1 #define FREE_ARG char* #define MM 7 #define NSTACK 50 #define EPS2 3.0e-11 #define Luv_over_SFR (double)(1./1.15/1e-28) // Luv/SFR = 1 / 1.15 x 10^-28 [M_solar yr^-1/erg s^-1 Hz^-1] // G. Sun and S. R. Furlanetto (2016) MNRAS, 417, 33 #define delta_lnMhalo (double)(5e-6) #define Mhalo_min (double)(1e6) #define Mhalo_max (double)(1e16) float calibrated_NF_min; double *deltaz, *deltaz_smoothed, *NeutralFractions, *z_Q, *Q_value, *nf_vals, *z_vals; int N_NFsamples,N_extrapolated, N_analytic, N_calibrated, N_deltaz; bool initialised_ComputeLF = false; gsl_interp_accel *LF_spline_acc; gsl_spline *LF_spline; gsl_interp_accel *deriv_spline_acc; gsl_spline *deriv_spline; struct CosmoParams *cosmo_params_ps; struct UserParams *user_params_ps; struct FlagOptions *flag_options_ps; //double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; double sigma_norm, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; float MinMass, mass_bin_width, inv_mass_bin_width; double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias); float *Mass_InterpTable, *Sigma_InterpTable, *dSigmadm_InterpTable; float *log10_overdense_spline_SFR, *log10_Nion_spline, *Overdense_spline_SFR, *Nion_spline; float *prev_log10_overdense_spline_SFR, *prev_log10_Nion_spline, *prev_Overdense_spline_SFR, *prev_Nion_spline; float *Mturns, *Mturns_MINI; float *log10_Nion_spline_MINI, *Nion_spline_MINI; float *prev_log10_Nion_spline_MINI, *prev_Nion_spline_MINI; float *xi_SFR,*wi_SFR, *xi_SFR_Xray, *wi_SFR_Xray; float *overdense_high_table, *overdense_low_table, *log10_overdense_low_table; float **log10_SFRD_z_low_table, **SFRD_z_high_table; float **log10_SFRD_z_low_table_MINI, **SFRD_z_high_table_MINI; double *lnMhalo_param, *Muv_param, *Mhalo_param; double *log10phi, *M_uv_z, *M_h_z; double *lnMhalo_param_MINI, *Muv_param_MINI, *Mhalo_param_MINI; double *log10phi_MINI; *M_uv_z_MINI, *M_h_z_MINI; double *deriv, *lnM_temp, *deriv_temp; double *z_val, *z_X_val, *Nion_z_val, *SFRD_val; double *Nion_z_val_MINI, *SFRD_val_MINI; void initialiseSigmaMInterpTable(float M_Min, float M_Max); void freeSigmaMInterpTable(); void initialiseGL_Nion(int n, float M_Min, float M_Max); void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max); float Mass_limit (float logM, float PL, float FRAC); void bisection(float *x, float xlow, float xup, int *iter); float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC); double sheth_delc(double del, double sig); float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2); double dNion_ConditionallnM(double lnM, void *params); double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); double dNion_ConditionallnM_MINI(double lnM, void *params); double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES); //JBM: Exact integral for power-law indices non zero (for zero it's erfc) double Fcollapprox (double numin, double beta); int n_redshifts_1DTable; double zmin_1DTable, zmax_1DTable, zbin_width_1DTable; double *FgtrM_1DTable_linear; static gsl_interp_accel *Q_at_z_spline_acc; static gsl_spline *Q_at_z_spline; static gsl_interp_accel *z_at_Q_spline_acc; static gsl_spline *z_at_Q_spline; static double Zmin, Zmax, Qmin, Qmax; void Q_at_z(double z, double *splined_value); void z_at_Q(double Q, double *splined_value); static gsl_interp_accel *deltaz_spline_for_photoncons_acc; static gsl_spline *deltaz_spline_for_photoncons; static gsl_interp_accel *NFHistory_spline_acc; static gsl_spline *NFHistory_spline; static gsl_interp_accel *z_NFHistory_spline_acc; static gsl_spline *z_NFHistory_spline; void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline); void z_at_NFHist(double xHI_Hist, double *splined_value); void NFHist_at_z(double z, double *splined_value); //int nbin; //double *z_Q, *Q_value, *Q_z, *z_value; double FinalNF_Estimate, FirstNF_Estimate; struct parameters_gsl_FgtrM_int_{ double z_obs; double gf_obs; }; struct parameters_gsl_SFR_General_int_{ double z_obs; double gf_obs; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; struct parameters_gsl_SFR_con_int_{ double gf_obs; double Mval; double sigma2; double delta1; double delta2; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; unsigned long *lvector(long nl, long nh); void free_lvector(unsigned long *v, long nl, long nh); float *vector(long nl, long nh); void free_vector(float *v, long nl, long nh); void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]); void splint(float xa[], float ya[], float y2a[], int n, float x, float *y); void gauleg(float x1, float x2, float x[], float w[], int n); /***** FUNCTION PROTOTYPES *****/ double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */ void free_ps(); /* deallocates the gsl structures from init_ps */ double sigma_z0(double M); //calculates sigma at z=0 (no dicke) double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double TFmdm(double k); //Eisenstein & Hu power spectrum transfer function void TFset_parameters(); double TF_CLASS(double k, int flag_int, int flag_dv); //transfer function of matter (flag_dv=0) and relative velocities (flag_dv=1) fluctuations from CLASS double power_in_vcb(double k); /* Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double FgtrM(double z, double M); double FgtrM_wsigma(double z, double sig); double FgtrM_st(double z, double M); double FgtrM_Watson(double growthf, double M); double FgtrM_Watson_z(double z, double growthf, double M); double FgtrM_General(double z, double M); float erfcc(float x); double splined_erfc(double x); double M_J_WDM(); void Broadcast_struct_global_PS(struct UserParams *user_params, struct CosmoParams *cosmo_params){ cosmo_params_ps = cosmo_params; user_params_ps = user_params; } /* this function reads the z=0 matter (CDM+baryons) and relative velocity transfer functions from CLASS (from a file) flag_int = 0 to initialize interpolator, flag_int = -1 to free memory, flag_int = else to interpolate. flag_dv = 0 to output density, flag_dv = 1 to output velocity. similar to built-in function "double T_RECFAST(float z, int flag)" */ double TF_CLASS(double k, int flag_int, int flag_dv) { static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH]; static gsl_interp_accel *acc_density, *acc_vcb; static gsl_spline *spline_density, *spline_vcb; float trash, currk, currTm, currTv; double ans; int i; int gsl_status; FILE *F; char filename[500]; sprintf(filename,"%s/%s",global_params.external_table_path,CLASS_FILENAME); if (flag_int == 0) { // Initialize vectors and read file if (!(F = fopen(filename, "r"))) { LOG_ERROR("Unable to open file: %s for reading.", filename); Throw(IOError); } int nscans; for (i = 0; i < CLASS_LENGTH; i++) { nscans = fscanf(F, "%e %e %e ", &currk, &currTm, &currTv); if (nscans != 3) { LOG_ERROR("Reading CLASS Transfer Function failed."); Throw(IOError); } kclass[i] = currk; Tmclass[i] = currTm; Tvclass_vcb[i] = currTv; if (i > 0 && kclass[i] <= kclass[i - 1]) { LOG_WARNING("Tk table not ordered"); LOG_WARNING("k=%.1le kprev=%.1le", kclass[i], kclass[i - 1]); } } fclose(F); LOG_SUPER_DEBUG("Read CLASS Transfer file"); gsl_set_error_handler_off(); // Set up spline table for densities acc_density = gsl_interp_accel_alloc (); spline_density = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_density, kclass, Tmclass, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS Density Spline."); //Set up spline table for velocities acc_vcb = gsl_interp_accel_alloc (); spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_vcb, kclass, Tvclass_vcb, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS velocity Spline."); return 0; } else if (flag_int == -1) { gsl_spline_free (spline_density); gsl_interp_accel_free(acc_density); gsl_spline_free (spline_vcb); gsl_interp_accel_free(acc_vcb); return 0; } if (k > kclass[CLASS_LENGTH-1]) { // k>kmax LOG_WARNING("Called TF_CLASS with k=%f, larger than kmax! Returning value at kmax.", k); if(flag_dv == 0){ // output is density return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } else if(flag_dv == 1){ // output is rel velocity return (Tvclass_vcb[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } //we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much. } else { // Do spline if(flag_dv == 0){ // output is density ans = gsl_spline_eval (spline_density, k, acc_density); } else if(flag_dv == 1){ // output is relative velocity ans = gsl_spline_eval (spline_vcb, k, acc_vcb); } else{ ans=0.0; //neither densities not velocities? } } return ans/k/k; //we have to divide by k^2 to agree with the old-fashioned convention. } // FUNCTION sigma_z0(M) // Returns the standard deviation of the normalized, density excess (delta(x)) field, // smoothed on the comoving scale of M (see filter definitions for M<->R conversion). // The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor, // i.e. sigma(M,z) = sigma_z0(m) * dicke(z) // normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H // NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation // M is in solar masses // References: Padmanabhan, pg. 210, eq. 5.107 double dsigma_dk(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; kR = k*Radius; if ( (global_params.FILTER == 0) || (sigma_norm < 0) ){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } return k*k*p*w*w; } double sigma_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius, KBOT_CLASS); kend = fmin(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); F.function = &dsigma_dk; F.params = &Radius; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: M=%e",M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return sigma_norm * sqrt(result); } // FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5 double TFmdm(double k){ double q, gamma_eff, q_eff, TF_m, q_nu; q = k*pow(theta_cmb,2)/omhh; gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4)); q_eff = q/gamma_eff; TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff); TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11))); q_nu = 3.92*q/sqrt(f_nu/N_nu); TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) / (pow(q_nu,-1.6)+pow(q_nu,0.8)); return TF_m; } void TFset_parameters(){ double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality; LOG_DEBUG("Setting Transfer Function parameters."); z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0; k_equality = 0.0746*omhh/(theta_cmb*theta_cmb); z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674)); z_drag = 1 + z_drag*pow(cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle, 0.238*pow(omhh, 0.223)); z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828)); y_d = (1 + z_equality) / (1.0 + z_drag); R_drag = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag); R_equality = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality); sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) * log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) ); p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0; p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0; f_c = 1 - f_nu - f_baryon; f_cb = 1 - f_nu; f_nub = f_nu+f_baryon; alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0); alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3); alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu; alpha_nu *= pow(1+y_d, p_c-p_cb); alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d); beta_c = 1.0/(1.0-0.949*f_nub); } // Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V) // at a given k mode linearly extrapolated to z=0 double power_in_k(double k){ double p, T, gamma, q, aa, bb, cc; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } /* Returns the value of the linear power spectrum of the DM-b relative velocity at kinematic decoupling (which we set at zkin=1010) */ double power_in_vcb(double k){ double p, T, gamma, q, aa, bb, cc; //only works if using CLASS if (user_params_ps->POWER_SPECTRUM == 5){ // CLASS T = TF_CLASS(k, 1, 1); //read from CLASS file. flag_int=1 since we have initialized before, flag_vcb=1 for velocity p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } double init_ps(){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; //we start the interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ LOG_DEBUG("Setting CLASS Transfer Function inits."); TF_CLASS(1.0, 0, 0); } // Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc R_CUTOFF = 0.201*pow((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15, 0.15)*pow(global_params.g_x/1.5, -0.29)*pow(global_params.M_WDM, -1.15); omhh = cosmo_params_ps->OMm*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle; theta_cmb = T_cmb / 2.7; // Translate Parameters into forms GLOBALVARIABLES form f_nu = global_params.OMn/cosmo_params_ps->OMm; f_baryon = cosmo_params_ps->OMb/cosmo_params_ps->OMm; if (f_nu < TINY) f_nu = 1e-10; if (f_baryon < TINY) f_baryon = 1e-10; TFset_parameters(); sigma_norm = -1; double Radius_8; Radius_8 = 8.0/cosmo_params_ps->hlittle; if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius_8, KBOT_CLASS); kend = fmin(350.0/Radius_8, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius_8; kend = 350.0/Radius_8; } lower_limit = kstart; upper_limit = kend; LOG_DEBUG("Initializing Power Spectrum with lower_limit=%e, upper_limit=%e, rel_tol=%e, radius_8=%g", lower_limit,upper_limit, rel_tol, Radius_8); F.function = &dsigma_dk; F.params = &Radius_8; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); GSL_ERROR(status); } gsl_integration_workspace_free (w); LOG_DEBUG("Initialized Power Spectrum."); sigma_norm = cosmo_params_ps->SIGMA_8/sqrt(result); //takes care of volume factor return R_CUTOFF; } //function to free arrays related to the power spectrum void free_ps(){ //we free the PS interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ TF_CLASS(1.0, -1, 0); } return; } /* FUNCTION dsigmasqdm_z0(M) returns d/dm (sigma^2) (see function sigma), in units of Msun^-1 */ double dsigmasq_dm(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5 T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / (cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // JBM: CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; // now get the value of the window function kR = k * Radius; if (global_params.FILTER == 0){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} // now do d(w^2)/dm = 2 w dw/dr dr/dm if ( (kR) < 1.0e-10 ){ dwdr = 0;} else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*Radius);} //3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );} // dwdr = -1e8 * k / (R*1e3); drdm = 1.0 / (4.0*PI * cosmo_params_ps->OMm*RHOcrit * Radius*Radius); } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); dwdr = - k*kR * w; drdm = 1.0 / (pow(2*PI, 1.5) * cosmo_params_ps->OMm*RHOcrit * 3*Radius*Radius); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } // return k*k*p*2*w*dwdr*drdm * d2fact; return k*k*p*2*w*dwdr*drdm; } double dsigmasqdm_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius, KBOT_CLASS); kend = fmin(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); if (user_params_ps->POWER_SPECTRUM == 5){ // for CLASS we do not need to renormalize the sigma integral. d2fact=1.0; } else { d2fact = M*10000/sigma_z0(M); } F.function = &dsigmasq_dm; F.params = &Radius; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: M=%e",M); GSL_ERROR(status); } gsl_integration_workspace_free (w); // return sigma_norm * sigma_norm * result /d2fact; return sigma_norm * sigma_norm * result; } /* sheth correction to delta crit */ double sheth_delc(double del, double sig){ return sqrt(SHETH_a)*del*(1. + global_params.SHETH_b*pow(sig*sig/(SHETH_a*del*del), global_params.SHETH_c)); } /* FUNCTION dNdM_st(z, M) Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Sheth, Mo, Torman 2001 */ double dNdM_st(double growthf, double M){ double sigma, dsigmadm, nuhat; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); nuhat = sqrt(SHETH_a) * Deltac / sigma; return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2./PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0); } /* FUNCTION dNdM_WatsonFOF(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function (Eq. 12) of Watson et al. 2013 The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF(double growthf, double M){ double sigma, dsigmadm, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); f_sigma = Watson_A * ( pow( Watson_beta/sigma, Watson_alpha) + 1. ) * exp( - Watson_gamma/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM_WatsonFOF_z(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function, with redshift evolution (Eq. 12 - 15) of Watson et al. 2013. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF_z(double z, double growthf, double M){ double sigma, dsigmadm, A_z, alpha_z, beta_z, Omega_m_z, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); Omega_m_z = (cosmo_params_ps->OMm)*pow(1.+z,3.) / ( (cosmo_params_ps->OMl) + (cosmo_params_ps->OMm)*pow(1.+z,3.) + (global_params.OMr)*pow(1.+z,4.) ); A_z = Omega_m_z * ( Watson_A_z_1 * pow(1. + z, Watson_A_z_2 ) + Watson_A_z_3 ); alpha_z = Omega_m_z * ( Watson_alpha_z_1 * pow(1.+z, Watson_alpha_z_2 ) + Watson_alpha_z_3 ); beta_z = Omega_m_z * ( Watson_beta_z_1 * pow(1.+z, Watson_beta_z_2 ) + Watson_beta_z_3 ); f_sigma = A_z * ( pow(beta_z/sigma, alpha_z) + 1. ) * exp( - Watson_gamma_z/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM(growthf, M) Computes the Press_schechter mass function at redshift z (using the growth factor), and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Padmanabhan, pg. 214 */ double dNdM(double growthf, double M){ double sigma, dsigmadm; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma)); } /* FUNCTION FgtrM(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM(double z, double M){ double del, sig; del = Deltac/dicke(z); //regular spherical collapse delta sig = sigma_z0(M); return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_wsigma(z, sigma_z0(M)) Computes the fraction of mass contained in haloes with mass > M at redshift z. Requires sigma_z0(M) rather than M to make certain heating integrals faster */ double FgtrM_wsigma(double z, double sig){ double del; del = Deltac/dicke(z); //regular spherical collapse delta return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson_z (double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; return dNdM_WatsonFOF_z(z, growthf, M) * M * M; } double FgtrM_Watson_z(double z, double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson_z; struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson (double lnM, void *params){ double growthf = *(double *)params; double M = exp(lnM); return dNdM_WatsonFOF(growthf, M) * M * M; } double FgtrM_Watson(double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson; F.params = &growthf; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M=%e",growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } double dFdlnM_General(double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassFunction; if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf, M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M; } /* FUNCTION FgtrM_General(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM_General(double z, double M){ double del, sig, growthf; int status; growthf = dicke(z); struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_General; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } double dNion_General(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * Fstar * Fesc; } double Nion_General(double z, double M_Min, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc){ double growthf; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; int status; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e Alpha_star=%e Alpha_esc=%e",z,growthf,MassTurnover,Alpha_star,Alpha_esc); LOG_ERROR("data: Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } double dNion_General_MINI(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * exp(-M/MassTurnover_upper) * Fstar * Fesc; } double Nion_General_MINI(double z, double M_Min, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar7_MINI, double Fesc7_MINI, double Mlim_Fstar, double Mlim_Fesc){ double growthf; int status; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General_MINI; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occurred!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e MassTurnover_upper=%e",z,growthf,MassTurnover,MassTurnover_upper); LOG_ERROR("data: Alpha_star=%e Alpha_esc=%e Fstar7_MINI=%e Fesc7_MINI=%e Mlim_Fstar=%e Mlim_Fesc=%e",Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } /* returns the "effective Jeans mass" in Msun corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */ double M_J_WDM(){ double z_eq, fudge=60; if (!(global_params.P_CUTOFF)) return 0; z_eq = 3600*(cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15; return fudge*3.06e8 * (1.5/global_params.g_x) * sqrt((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15) * pow(global_params.M_WDM, -4) * pow(z_eq/3000.0, 1.5); } float erfcc(float x) { double t,q,ans; q=fabs(x); t=1.0/(1.0+0.5*q); ans=t*exp(-q*q-1.2655122+t*(1.0000237+t*(0.374092+t*(0.0967842+ t*(-0.1862881+t*(0.2788681+t*(-1.13520398+t*(1.4885159+ t*(-0.82215223+t*0.17087277))))))))); return x >= 0.0 ? ans : 2.0-ans; } double splined_erfc(double x){ if (x < 0){ return 1.0; } // TODO: This could be wrapped in a Try/Catch to try the fast way and if it doesn't // work, use the slow way. return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1)) return erfcc(x); else return exp(gsl_spline_eval(erfc_spline, x, erfc_acc)); } void gauleg(float x1, float x2, float x[], float w[], int n) //Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n, //containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula. { int m,j,i; double z1,z,xm,xl,pp,p3,p2,p1; m=(n+1)/2; xm=0.5*(x2+x1); xl=0.5*(x2-x1); for (i=1;i<=m;i++) { //High precision is a good idea for this routine. //The roots are symmetric in the interval, so we only have to find half of them. //Loop over the desired roots. z=cos(3.141592654*(i-0.25)/(n+0.5)); //Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method. do { p1=1.0; p2=0.0; for (j=1;j<=n;j++) { //Loop up the recurrence relation to get the Legendre polynomial evaluated at z. p3=p2; p2=p1; p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j; } //p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2, //the polynomial of one lower order. pp=n*(z*p1-p2)/(z*z-1.0); z1=z; z=z1-p1/pp; } while (fabs(z-z1) > EPS2); x[i]=xm-xl*z; x[n+1-i]=xm+xl*z; w[i]=2.0*xl/((1.0-z*z)*pp*pp); w[n+1-i]=w[i]; } } void initialiseSigmaMInterpTable(float M_Min, float M_Max) { int i; float Mass; if (Mass_InterpTable == NULL){ Mass_InterpTable = calloc(NMass,sizeof(float)); Sigma_InterpTable = calloc(NMass,sizeof(float)); dSigmadm_InterpTable = calloc(NMass,sizeof(float)); } #pragma omp parallel shared(Mass_InterpTable,Sigma_InterpTable,dSigmadm_InterpTable) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NMass;i++) { Mass_InterpTable[i] = log(M_Min) + (float)i/(NMass-1)*( log(M_Max) - log(M_Min) ); Sigma_InterpTable[i] = sigma_z0(exp(Mass_InterpTable[i])); dSigmadm_InterpTable[i] = log10(-dsigmasqdm_z0(exp(Mass_InterpTable[i]))); } } for(i=0;i<NMass;i++) { if(isfinite(Mass_InterpTable[i]) == 0 || isfinite(Sigma_InterpTable[i]) == 0 || isfinite(dSigmadm_InterpTable[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in initialiseSigmaMInterpTable"); // Throw(ParameterError); Throw(TableGenerationError); } } MinMass = log(M_Min); mass_bin_width = 1./(NMass-1)*( log(M_Max) - log(M_Min) ); inv_mass_bin_width = 1./mass_bin_width; } void freeSigmaMInterpTable() { free(Mass_InterpTable); free(Sigma_InterpTable); free(dSigmadm_InterpTable); Mass_InterpTable = NULL; } void nrerror(char error_text[]) { LOG_ERROR("Numerical Recipes run-time error..."); LOG_ERROR("%s",error_text); Throw(MemoryAllocError); } float *vector(long nl, long nh) /* allocate a float vector with subscript range v[nl..nh] */ { float *v; v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float))); if(!v) nrerror("allocation failure in vector()"); return v - nl + NR_END; } void free_vector(float *v, long nl, long nh) /* free a float vector allocated with vector() */ { free((FREE_ARG) (v+nl-NR_END)); } void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]) /*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary condition for a natural spline, with zero second derivative on that boundary.*/ { int i,k; float p,qn,sig,un,*u; int na,nb,check; u=vector(1,n-1); if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural" y2[1]=u[1]=0.0; else { // or else to have a specified first derivative. y2[1] = -0.5; u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1); } for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm. sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary na = 1; nb = 1; check = 0; while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]); } p=sig*y2[i-1]+2.0; //storage of the decomposed y2[i]=(sig-1.0)/p; // factors. u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]); u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p; if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) { na = 0; nb = 0; check = 0; while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } } u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]); u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p; } } if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural" qn=un=0.0; else { //or else to have a specified first derivative. qn=0.5; un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1])); } y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0); for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm. } free_vector(u,1,n-1); } void splint(float xa[], float ya[], float y2a[], int n, float x, float *y) /*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order), and given the array y2a[1..n], which is the output from spline above, and given a value of x, this routine returns a cubic-spline interpolated value y.*/ { void nrerror(char error_text[]); int klo,khi,k; float h,b,a; klo=1; // We will find the right place in the table by means of khi=n; //bisection. This is optimal if sequential calls to this while (khi-klo > 1) { //routine are at random values of x. If sequential calls k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if else klo=k; //they remain appropriate on the next call. } // klo and khi now bracket the input value of x. h=xa[khi]-xa[klo]; if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct. a=(xa[khi]-x)/h; b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated. *y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0; } unsigned long *lvector(long nl, long nh) /* allocate an unsigned long vector with subscript range v[nl..nh] */ { unsigned long *v; v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long))); if(!v) nrerror("allocation failure in lvector()"); return v - nl + NR_END; } void free_lvector(unsigned long *v, long nl, long nh) /* free an unsigned long vector allocated with lvector() */ { free((FREE_ARG) (v+nl-NR_END)); } /* dnbiasdM */ double dnbiasdM(double M, float z, double M_o, float del_o){ double sigsq, del, sig_one, sig_o; if ((M_o-M) < TINY){ LOG_ERROR("In function dnbiasdM: M must be less than M_o!\nAborting...\n"); Throw(ValueError); } del = Deltac/dicke(z) - del_o; if (del < 0){ LOG_ERROR(" In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n"); Throw(ValueError); } sig_o = sigma_z0(M_o); sig_one = sigma_z0(M); sigsq = sig_one*sig_one - sig_o*sig_o; return -(RHOcrit*cosmo_params_ps->OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M); } /* calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias */ double FgtrM_bias(double z, double M, double del_bias, double sig_bias){ double del, sig, sigsmallR; sigsmallR = sigma_z0(M); if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* Uses sigma parameters instead of Mass for scale */ double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){ double del, sig; if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* redshift derivative of the growth function at z */ double ddicke_dz(double z){ float dz = 1e-10; double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz; return (dicke(z+dz)-dicke(z))/dz; } /* compute a mass limit where the stellar baryon fraction and the escape fraction exceed unity */ float Mass_limit (float logM, float PL, float FRAC) { return FRAC*pow(pow(10.,logM)/1e10,PL); } void bisection(float *x, float xlow, float xup, int *iter){ *x=(xlow + xup)/2.; ++(*iter); } float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC){ int i, iter, max_iter=200; float rel_tol=0.001; float logMlow, logMupper, x, x1; iter = 0; logMlow = log10(Mmin); logMupper = log10(Mmax); if (PL < 0.) { if (Mass_limit(logMlow,PL,FRAC) <= 1.) { return Mmin; } } else if (PL > 0.) { if (Mass_limit(logMupper,PL,FRAC) <= 1.) { return Mmax; } } else return 0; bisection(&x, logMlow, logMupper, &iter); do { if((Mass_limit(logMlow,PL,FRAC)-1.)*(Mass_limit(x,PL,FRAC)-1.) < 0.) logMupper = x; else logMlow = x; bisection(&x1, logMlow, logMupper, &iter); if(fabs(x1-x) < rel_tol) { return pow(10.,x1); } x = x1; } while(iter < max_iter); // Got to max_iter without finding a solution. LOG_ERROR("Failed to find a mass limit to regulate stellar fraction/escape fraction is between 0 and 1."); LOG_ERROR(" The solution does not converge or iterations are not sufficient."); // Throw(ParameterError); Throw(MassDepZetaError); return(0.0); } int initialise_ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); lnMhalo_param = calloc(nbins,sizeof(double)); Muv_param = calloc(nbins,sizeof(double)); Mhalo_param = calloc(nbins,sizeof(double)); LF_spline_acc = gsl_interp_accel_alloc(); LF_spline = gsl_spline_alloc(gsl_interp_cspline, nbins); init_ps(); int status; Try initialiseSigmaMInterpTable(0.999*Mhalo_min,1.001*Mhalo_max); Catch(status) { LOG_ERROR("\t...called from initialise_ComputeLF"); return(status); } initialised_ComputeLF = true; return(0); } void cleanup_ComputeLF(){ free(lnMhalo_param); free(Muv_param); free(Mhalo_param); gsl_spline_free (LF_spline); gsl_interp_accel_free(LF_spline_acc); freeSigmaMInterpTable(); initialised_ComputeLF = 0; } int ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, int component, int NUM_OF_REDSHIFT_FOR_LF, float *z_LF, float *M_TURNs, double *M_uv_z, double *M_h_z, double *log10phi) { /* This is an API-level function and thus returns an int status. */ int status; Try{ // This try block covers the whole function. // This NEEDS to be done every time, because the actual object passed in as // user_params, cosmo_params etc. can change on each call, freeing up the memory. initialise_ComputeLF(nbins, user_params,cosmo_params,astro_params,flag_options); int i,i_z; int i_unity, i_smth, mf, nbins_smth=7; double dlnMhalo, lnMhalo_i, SFRparam, Muv_1, Muv_2, dMuvdMhalo; double Mhalo_i, lnMhalo_min, lnMhalo_max, lnMhalo_lo, lnMhalo_hi, dlnM, growthf; double f_duty_upper, Mcrit_atom; float Fstar, Fstar_temp; double dndm; int gsl_status; gsl_set_error_handler_off(); if (astro_params->ALPHA_STAR < -0.5) LOG_WARNING( "ALPHA_STAR is %f, which is unphysical value given the observational LFs.\n"\ "Also, when ALPHA_STAR < -.5, LFs may show a kink. It is recommended to set ALPHA_STAR > -0.5.", astro_params->ALPHA_STAR ); mf = user_params_ps->HMF; lnMhalo_min = log(Mhalo_min*0.999); lnMhalo_max = log(Mhalo_max*1.001); dlnMhalo = (lnMhalo_max - lnMhalo_min)/(double)(nbins - 1); for (i_z=0; i_z<NUM_OF_REDSHIFT_FOR_LF; i_z++) { growthf = dicke(z_LF[i_z]); Mcrit_atom = atomic_cooling_threshold(z_LF[i_z]); i_unity = -1; for (i=0; i<nbins; i++) { // generate interpolation arrays lnMhalo_param[i] = lnMhalo_min + dlnMhalo*(double)i; Mhalo_i = exp(lnMhalo_param[i]); if (component == 1) Fstar = astro_params->F_STAR10*pow(Mhalo_i/1e10,astro_params->ALPHA_STAR); else Fstar = astro_params->F_STAR7_MINI*pow(Mhalo_i/1e7,astro_params->ALPHA_STAR_MINI); if (Fstar > 1.) Fstar = 1; if (i_unity < 0) { // Find the array number at which Fstar crosses unity. if (astro_params->ALPHA_STAR > 0.) { if ( (1.- Fstar) < FRACT_FLOAT_ERR ) i_unity = i; } else if (astro_params->ALPHA_STAR < 0. && i < nbins-1) { if (component == 1) Fstar_temp = astro_params->F_STAR10*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e10,astro_params->ALPHA_STAR); else Fstar_temp = astro_params->F_STAR7_MINI*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e7,astro_params->ALPHA_STAR_MINI); if (Fstar_temp < 1. && (1.- Fstar) < FRACT_FLOAT_ERR) i_unity = i; } } // parametrization of SFR SFRparam = Mhalo_i * cosmo_params->OMb/cosmo_params->OMm * (double)Fstar * (double)(hubble(z_LF[i_z])*SperYR/astro_params->t_STAR); // units of M_solar/year Muv_param[i] = 51.63 - 2.5*log10(SFRparam*Luv_over_SFR); // UV magnitude // except if Muv value is nan or inf, but avoid error put the value as 10. if ( isinf(Muv_param[i]) || isnan(Muv_param[i]) ) Muv_param[i] = 10.; M_uv_z[i + i_z*nbins] = Muv_param[i]; } gsl_status = gsl_spline_init(LF_spline, lnMhalo_param, Muv_param, nbins); GSL_ERROR(gsl_status); lnMhalo_lo = log(Mhalo_min); lnMhalo_hi = log(Mhalo_max); dlnM = (lnMhalo_hi - lnMhalo_lo)/(double)(nbins - 1); // There is a kink on LFs at which Fstar crosses unity. This kink is a numerical artefact caused by the derivate of dMuvdMhalo. // Most of the cases the kink doesn't appear in magnitude ranges we are interested (e.g. -22 < Muv < -10). However, for some extreme // parameters, it appears. To avoid this kink, we use the interpolation of the derivate in the range where the kink appears. // 'i_unity' is the array number at which the kink appears. 'i_unity-3' and 'i_unity+12' are related to the range of interpolation, // which is an arbitrary choice. // NOTE: This method does NOT work in cases with ALPHA_STAR < -0.5. But, this parameter range is unphysical given that the // observational LFs favour positive ALPHA_STAR in this model. // i_smth = 0: calculates LFs without interpolation. // i_smth = 1: calculates LFs using interpolation where Fstar crosses unity. if (i_unity-3 < 0) i_smth = 0; else if (i_unity+12 > nbins-1) i_smth = 0; else i_smth = 1; if (i_smth == 0) { for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) { log10phi[i + i_z*nbins] = log10( dNdM(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==1) { log10phi[i + i_z*nbins] = log10( dNdM_st(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==2) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==3) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF_z(z_LF[i_z], growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } else { lnM_temp = calloc(nbins_smth,sizeof(double)); deriv_temp = calloc(nbins_smth,sizeof(double)); deriv = calloc(nbins,sizeof(double)); for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); deriv[i] = fabs(dMuvdMhalo); } deriv_spline_acc = gsl_interp_accel_alloc(); deriv_spline = gsl_spline_alloc(gsl_interp_cspline, nbins_smth); // generate interpolation arrays to smooth discontinuity of the derivative causing a kink // Note that the number of array elements and the range of interpolation are made by arbitrary choices. lnM_temp[0] = lnMhalo_param[i_unity - 3]; lnM_temp[1] = lnMhalo_param[i_unity - 2]; lnM_temp[2] = lnMhalo_param[i_unity + 8]; lnM_temp[3] = lnMhalo_param[i_unity + 9]; lnM_temp[4] = lnMhalo_param[i_unity + 10]; lnM_temp[5] = lnMhalo_param[i_unity + 11]; lnM_temp[6] = lnMhalo_param[i_unity + 12]; deriv_temp[0] = deriv[i_unity - 3]; deriv_temp[1] = deriv[i_unity - 2]; deriv_temp[2] = deriv[i_unity + 8]; deriv_temp[3] = deriv[i_unity + 9]; deriv_temp[4] = deriv[i_unity + 10]; deriv_temp[5] = deriv[i_unity + 11]; deriv_temp[6] = deriv[i_unity + 12]; gsl_status = gsl_spline_init(deriv_spline, lnM_temp, deriv_temp, nbins_smth); GSL_ERROR(gsl_status); for (i=0;i<9;i++){ deriv[i_unity + i - 1] = gsl_spline_eval(deriv_spline, lnMhalo_param[i_unity + i - 1], deriv_spline_acc); } for (i=0; i<nbins; i++) { if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) dndm = dNdM(growthf, Mhalo_param[i]); else if(mf==1) dndm = dNdM_st(growthf, Mhalo_param[i]); else if(mf==2) dndm = dNdM_WatsonFOF(growthf, Mhalo_param[i]); else if(mf==3) dndm = dNdM_WatsonFOF_z(z_LF[i_z], growthf, Mhalo_param[i]); else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } log10phi[i + i_z*nbins] = log10(dndm * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / deriv[i]); if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } } cleanup_ComputeLF(); } // End try Catch(status){ return status; } return(0); } void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR_Xray,wi_SFR_Xray,n); } float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2){ float sigma1, dsigmadm,dsigma_val; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (M1 - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma1 = Sigma_InterpTable[MassBin] + ( M1 - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigma_val = dSigmadm_InterpTable[MassBin] + ( M1 - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigma_val); } else { sigma1 = sigma_z0(exp(M1)); dsigmadm = dsigmasqdm_z0(exp(M1)); } M1 = exp(M1); M2 = exp(M2); sigma1 = sigma1*sigma1; sigma2 = sigma2*sigma2; dsigmadm = dsigmadm/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know! if((sigma1 > sigma2)) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5)); } else if(sigma1==sigma2) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5)); } else { return 0.; } } void initialiseGL_Nion(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR,wi_SFR,n); } double dNion_ConditionallnM_MINI(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double dNion_ConditionallnM(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion_MINI(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) MassTurnover_upper, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard old code double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; int status; F.function = &dNion_ConditionallnM_MINI; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M2=%e sigma2=%e delta1=%e delta2=%e MassTurnover=%e",growthf,M2,sigma2,delta1,delta2,MassTurnover); LOG_ERROR("data: MassTurnover_upper=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover_upper,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; F.function = &dNion_ConditionallnM; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M1=%e M2=%e sigma2=%e delta1=%e delta2=%e",growthf,M1,M2,sigma2,delta1,delta2); LOG_ERROR("data: MassTurnover=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } float Nion_ConditionallnM_GL_MINI(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float MassTurnover_upper = parameters_gsl_SFR_con.Mdrop_upper; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar7_MINI = parameters_gsl_SFR_con.frac_star; float Fesc7_MINI = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } float Nion_ConditionallnM_GL(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar10 = parameters_gsl_SFR_con.frac_star; float Fesc10 = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } //JBM: Same as above but for minihaloes. Has two cutoffs, lower and upper. float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES) { double result, nu_lower_limit, nu_higher_limit, nupivot; int i; double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar_MINI, .LimitMass_Fesc = Mlim_Fesc_MINI }; if(delta2 > delta1*0.9999) { result = 1.; return result; } if(FAST_FCOLL_TABLES){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. if(MassTurnover_upper <= MassTurnover){ return 1e-40; //in sharp cut it's zero } double delta_arg = pow( (delta1 - delta2)/growthf , 2.); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1 * sigmaM1 - sigma2 * sigma2); LogMass = log(MassTurnover_upper); MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM2 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_higher_limit = delta_arg/(sigmaM2*sigmaM2-sigma2*sigma2); //note we keep nupivot1 just in case very negative delta makes it reach that nu LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot1>nu>nupivot2 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. // // 3PLs double fcollres=0.0; double fcollres_high=0.0; //for the higher threshold to subtract // re-written for further speedups if (nu_higher_limit <= nupivot2){ //if both are below pivot2 don't bother adding and subtracting the high contribution fcollres=(Fcollapprox(nu_lower_limit,beta3))*pow(nupivot2,-beta3); fcollres_high=(Fcollapprox(nu_higher_limit,beta3))*pow(nupivot2,-beta3); } else { fcollres_high=(Fcollapprox(nu_higher_limit,beta2))*pow(nupivot1,-beta2); if (nu_lower_limit > nupivot2){ fcollres=(Fcollapprox(nu_lower_limit,beta2))*pow(nupivot1,-beta2); } else { fcollres=(Fcollapprox(nupivot2,beta2))*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres < fcollres_high){ return 1e-40; } return (fcollres-fcollres_high); } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } } return integrand; } } //JBM: Added the approximation if user_params->FAST_FCOLL_TABLES==True float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES) { //Performs the Gauss-Legendre quadrature. int i; double result, nu_lower_limit, nupivot; if(delta2 > delta1*0.9999) { result = 1.; return result; } double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. double delta_arg = pow( (delta1 - delta2)/growthf , 2.0); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1*sigmaM1-sigma2*sigma2); LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot2<nu<nupivot1 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. double nucrit_sigma2 = delta_arg*pow(sigma2+1e-10,-2.0); //above this nu sigma2>sigma1, so HMF=0. eps added to avoid infinities // // 3PLs double fcollres=0.0; if(nu_lower_limit >= nucrit_sigma2){ //fully in the flat part of sigma(nu), M^alpha is nu-independent. return 1e-40; } else{ //we subtract the contribution from high nu, since the HMF is set to 0 if sigma2>sigma1 fcollres -= Fcollapprox(nucrit_sigma2,beta1)*pow(nupivot1,-beta1); } if(nu_lower_limit >= nupivot1){ fcollres+=Fcollapprox(nu_lower_limit,beta1)*pow(nupivot1,-beta1); } else{ fcollres+=Fcollapprox(nupivot1,beta1)*pow(nupivot1,-beta1); if (nu_lower_limit > nupivot2){ fcollres+=(Fcollapprox(nu_lower_limit,beta2)-Fcollapprox(nupivot1,beta2))*pow(nupivot1,-beta2); } else { fcollres+=(Fcollapprox(nupivot2,beta2)-Fcollapprox(nupivot1,beta2) )*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres<=0.0){ LOG_DEBUG("Negative fcoll? fc=%.1le Mt=%.1le \n",fcollres, MassTurnover); fcollres=1e-40; } return fcollres; } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } } return integrand; } } #include <gsl/gsl_sf_gamma.h> //JBM: Integral of a power-law times exponential for EPS: \int dnu nu^beta * exp(-nu/2)/sqrt(nu) from numin to infty. double Fcollapprox (double numin, double beta){ //nu is deltacrit^2/sigma^2, corrected by delta(R) and sigma(R) double gg = gsl_sf_gamma_inc(0.5+beta,0.5*numin); return gg*pow(2,0.5+beta)*pow(2.0*PI,-0.5); } void initialise_Nion_General_spline(float z, float min_density, float max_density, float Mmax, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES){ float Mmin = MassTurnover/50.; double overdense_val, growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; #pragma omp parallel shared(log10_overdense_spline_SFR,log10_Nion_spline,overdense_small_low,overdense_small_high,growthf,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i,overdense_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ overdense_val = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); log10_overdense_spline_SFR[i] = overdense_val; log10_Nion_spline[i] = GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,pow(10.,overdense_val)-1.,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(fabs(log10_Nion_spline[i]) < 1e-38) { log10_Nion_spline[i] = 1e-38; } log10_Nion_spline[i] = log10(log10_Nion_spline[i]); if(log10_Nion_spline[i] < -40.){ log10_Nion_spline[i] = -40.; } log10_Nion_spline[i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(!isfinite(log10_Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } } #pragma omp parallel shared(Overdense_spline_SFR,Nion_spline,overdense_large_low,overdense_large_high,growthf,Mmin,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); Nion_spline[i] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(Nion_spline[i]<0.) { Nion_spline[i]=pow(10.,-40.0); } } } for(i=0;i<NSFR_high;i++) { if(!isfinite(Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_Nion_General_spline_MINI(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(log10_Nion_spline,growthf,Mmax,sigma2,log10_overdense_spline_SFR,Mturns,Mturns_MINI,\ Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,ln_10,log10_Nion_spline_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns[j],Alpha_star,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(log10_Nion_spline[i+j*NSFR_low] < -40.){ log10_Nion_spline[i+j*NSFR_low] = -40.; } log10_Nion_spline[i+j*NSFR_low] *= ln_10; log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(Nion_spline,growthf,Mmin,Mmax,sigma2,Overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ Nion_spline[i+j*NSFR_high] = Nion_ConditionalM( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES ); if(Nion_spline[i+j*NSFR_high]<0.) { Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES ); if(Nion_spline_MINI[i+j*NSFR_high]<0.) { Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_Nion_General_spline_MINI_prev(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ prev_log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { prev_Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(prev_log10_Nion_spline,growthf,Mmax,sigma2,prev_log10_overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_log10_Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ prev_log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns[j],\ Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline[i+j*NSFR_low] *= ln_10; prev_log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(prev_log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(prev_log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(prev_Nion_spline,growthf,Mmin,Mmax,sigma2,prev_Overdense_spline_SFR,Mturns,\ Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_Nion_spline_MINI,Mturns_MINI,\ Mcrit_atom,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ prev_Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,prev_Overdense_spline_SFR[i],\ Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(prev_Nion_spline[i+j*NSFR_high]<0.) { prev_Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } prev_Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(growthf,Mmin,Mmax,sigma2,Deltac,\ prev_Overdense_spline_SFR[i],Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,\ Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES); if(prev_Nion_spline_MINI[i+j*NSFR_high]<0.) { prev_Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(prev_Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(prev_Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_Nion_Ts_spline( int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10 ){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); #pragma omp parallel shared(z_val,Nion_z_val,zmin,zmax, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Nion_z_val[i] = Nion_General(z_val[i], Mmin, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_Nion_Ts_spline_MINI( int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Fstar7_MINI, float Fesc7_MINI ){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc, Mlim_Fstar_MINI, Mlim_Fesc_MINI, Mcrit_atom_val; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); Nion_z_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); Mlim_Fesc_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc7_MINI * pow(1e3, Alpha_esc)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_val,Nion_z_val,Nbin,zmin,zmax,Mmin,Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,\ Nion_z_val_MINI,MassTurnover,Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_val[i]); Nion_z_val[i] = Nion_General(z_val[i], Mmin, Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); for (j=0; j<NMTURN; j++){ Nion_z_val_MINI[i+j*Nbin] = Nion_General_MINI(z_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, Alpha_esc, Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI); } } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); // Throw(ParameterError); Throw(TableGenerationError); } for (j=0; j<NMTURN; j++){ if(isfinite(Nion_z_val_MINI[i+j*Nbin])==0){ j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_spline(int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Fstar10){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); #pragma omp parallel shared(z_X_val,SFRD_val,zmin,zmax, MassTurn, Alpha_star, Fstar10, Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, MassTurn, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_SFRD_spline_MINI(int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fstar_MINI, Mcrit_atom_val; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); SFRD_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_X_val,zmin,zmax,Nbin,SFRD_val,Mmin, Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\ SFRD_val_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_X_val[i]); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, Mcrit_atom_val, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); for (j=0; j<NMTURN; j++){ SFRD_val_MINI[i+j*Nbin] = Nion_General_MINI(z_X_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, 0., Fstar7_MINI, 1.,Mlim_Fstar_MINI,0.); } } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); // Throw(ParameterError); Throw(TableGenerationError); } for (j=0; j<NMTURN; j++){ if(isfinite(SFRD_val_MINI[i+j*Nbin])==0) { j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_Conditional_table( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float MassTurnover, float Alpha_star, float Fstar10, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = MassTurnover/50.; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, MassTurnover/50., Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); if(fabs(log10_SFRD_z_low_table[j][i]) < 1e-38) { log10_SFRD_z_low_table[j][i] = 1e-38; } log10_SFRD_z_low_table[j][i] = log10(log10_SFRD_z_low_table[j][i]); log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); // Throw(ParameterError); Throw(TableGenerationError); } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); SFRD_z_high_table[j][i] *= pow(10., 10.0); } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_Conditional_table_MINI( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float Mcrit_atom[], float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2,Mlim_Fstar_MINI; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = global_params.M_MIN_INTEGRAL; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, global_params.M_MIN_INTEGRAL, Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\ log10_SFRD_z_low_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI,ln_10) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = log10(GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table[j][i] < -50.){ log10_SFRD_z_low_table[j][i] = -50.; } log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; for (k=0; k<NMTURN; k++){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover[k], Mcrit_atom[j],Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] < -50.){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = -50.; } log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] += 10.0; log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); // Throw(ParameterError); Throw(TableGenerationError); } for (k=0; k<NMTURN; k++){ if(isfinite(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,\ Mlim_Fstar,SFRD_z_high_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],\ Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); if (SFRD_z_high_table[j][i] < 1e-50){ SFRD_z_high_table[j][i] = 1e-50; } SFRD_z_high_table[j][i] *= pow(10., 10.0); for (k=0; k<NMTURN; k++){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = Nion_ConditionalM_MINI(growthf[j],Mmin,Mmax,sigma2,Deltac,\ overdense_high_table[i],MassTurnover[k],Mcrit_atom[j],\ Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES); if (SFRD_z_high_table_MINI[j][i+k*NSFR_high] < 1e-50){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = 1e-50; } } } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); // Throw(ParameterError); Throw(TableGenerationError); } for (k=0; k<NMTURN; k++){ if(isfinite(SFRD_z_high_table_MINI[j][i+k*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } } // The volume filling factor at a given redshift, Q(z), or find redshift at a given Q, z(Q). // // The evolution of Q can be written as // dQ/dt = n_{ion}/dt - Q/t_{rec}, // where n_{ion} is the number of ionizing photons per baryon. The averaged recombination time is given by // t_{rec} ~ 0.93 Gyr * (C_{HII}/3)^-1 * (T_0/2e4 K)^0.7 * ((1+z)/7)^-3. // We assume the clumping factor of C_{HII}=3 and the IGM temperature of T_0 = 2e4 K, following // Section 2.1 of Kuhlen & Faucher-Gigue`re (2012) MNRAS, 423, 862 and references therein. // 1) initialise interpolation table // -> initialise_Q_value_spline(NoRec, M_TURN, ALPHA_STAR, ALPHA_ESC, F_STAR10, F_ESC10) // NoRec = 0: Compute dQ/dt with the recombination time. // NoRec = 1: Ignore recombination. // 2) find Q value at a given z -> Q_at_z(z, &(Q)) // or find z at a given Q -> z_at_Q(Q, &(z)). // 3) free memory allocation -> free_Q_value() // Set up interpolation table for the volume filling factor, Q, at a given redshift z and redshift at a given Q. int InitialisePhotonCons(struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { /* This is an API-level function for initialising the photon conservation. */ int status; Try{ // this try wraps the whole function. Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); init_ps(); // To solve differentail equation, uses Euler's method. // NOTE: // (1) With the fiducial parameter set, // when the Q value is < 0.9, the difference is less than 5% compared with accurate calculation. // When Q ~ 0.98, the difference is ~25%. To increase accuracy one can reduce the step size 'da', but it will increase computing time. // (2) With the fiducial parameter set, // the difference for the redshift where the reionization end (Q = 1) is ~0.2 % compared with accurate calculation. float ION_EFF_FACTOR,M_MIN,M_MIN_z0,M_MIN_z1,Mlim_Fstar, Mlim_Fesc; double a_start = 0.03, a_end = 1./(1. + global_params.PhotonConsEndCalibz); // Scale factors of 0.03 and 0.17 correspond to redshifts of ~32 and ~5.0, respectively. double C_HII = 3., T_0 = 2e4; double reduce_ratio = 1.003; double Q0,Q1,Nion0,Nion1,Trec,da,a,z0,z1,zi,dadt,ans,delta_a,zi_prev,Q1_prev; double *z_arr,*Q_arr; int Nmax = 2000; // This is the number of step, enough with 'da = 2e-3'. If 'da' is reduced, this number should be checked. int cnt, nbin, i, istart; int fail_condition, not_mono_increasing, num_fails; int gsl_status; z_arr = calloc(Nmax,sizeof(double)); Q_arr = calloc(Nmax,sizeof(double)); //set the minimum source mass if (flag_options->USE_MASS_DEPENDENT_ZETA) { ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10; M_MIN = astro_params->M_TURN/50.; Mlim_Fstar = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_STAR, astro_params->F_STAR10); Mlim_Fesc = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_ESC, astro_params->F_ESC10); if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20); } else{ initialiseSigmaMInterpTable(M_MIN,1e20); } } else { ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR; } fail_condition = 1; num_fails = 0; // We are going to come up with the analytic curve for the photon non conservation correction // This can be somewhat numerically unstable and as such we increase the sampling until it works // If it fails to produce a monotonically increasing curve (for Q as a function of z) after 10 attempts we crash out while(fail_condition!=0) { a = a_start; if(num_fails < 3) { da = 3e-3 - ((double)num_fails)*(1e-3); } else { da = 1e-3 - ((double)num_fails - 2.)*(1e-4); } delta_a = 1e-7; zi_prev = Q1_prev = 0.; not_mono_increasing = 0; if(num_fails>0) { for(i=0;i<Nmax;i++) { z_arr[i] = 0.; Q_arr[i] = 0.; } } cnt = 0; Q0 = 0.; while (a < a_end) { zi = 1./a - 1.; z0 = 1./(a+delta_a) - 1.; z1 = 1./(a-delta_a) - 1.; // Ionizing emissivity (num of photons per baryon) if (flag_options->USE_MASS_DEPENDENT_ZETA) { Nion0 = ION_EFF_FACTOR*Nion_General(z0, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); Nion1 = ION_EFF_FACTOR*Nion_General(z1, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); } else { //set the minimum source mass if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 1.22); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 1.22); } else { // ionized IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 0.6); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 0.6); } if(M_MIN_z0 < M_MIN_z1) { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z0),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z0,1e20); } } else { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z1),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z1,1e20); } } Nion0 = ION_EFF_FACTOR*FgtrM_General(z0,M_MIN_z0); Nion1 = ION_EFF_FACTOR*FgtrM_General(z1,M_MIN_z1); freeSigmaMInterpTable(); } // With scale factor a, the above equation is written as dQ/da = n_{ion}/da - Q/t_{rec}*(dt/da) if (!global_params.RecombPhotonCons) { Q1 = Q0 + ((Nion0-Nion1)/2/delta_a)*da; // No Recombination } else { dadt = Ho*sqrt(cosmo_params_ps->OMm/a + global_params.OMr/a/a + cosmo_params_ps->OMl*a*a); // da/dt = Ho*a*sqrt(OMm/a^3 + OMr/a^4 + OMl) Trec = 0.93 * 1e9 * SperYR * pow(C_HII/3.,-1) * pow(T_0/2e4,0.7) * pow((1.+zi)/7.,-3); Q1 = Q0 + ((Nion0-Nion1)/2./delta_a - Q0/Trec/dadt)*da; } // Curve is no longer monotonically increasing, we are going to have to exit and start again if(Q1 < Q1_prev) { not_mono_increasing = 1; break; } zi_prev = zi; Q1_prev = Q1; z_arr[cnt] = zi; Q_arr[cnt] = Q1; cnt = cnt + 1; if (Q1 >= 1.0) break; // if fully ionized, stop here. // As the Q value increases, the bin size decreases gradually because more accurate calculation is required. if (da < 7e-5) da = 7e-5; // set minimum bin size. else da = pow(da,reduce_ratio); Q0 = Q1; a = a + da; } // A check to see if we ended up with a monotonically increasing function if(not_mono_increasing==0) { fail_condition = 0; } else { num_fails += 1; if(num_fails>10) { LOG_ERROR("Failed too many times."); // Throw ParameterError; Throw(PhotonConsError); } } } cnt = cnt - 1; istart = 0; for (i=1;i<cnt;i++){ if (Q_arr[i-1] == 0. && Q_arr[i] != 0.) istart = i-1; } nbin = cnt - istart; N_analytic = nbin; // initialise interploation Q as a function of z z_Q = calloc(nbin,sizeof(double)); Q_value = calloc(nbin,sizeof(double)); Q_at_z_spline_acc = gsl_interp_accel_alloc (); Q_at_z_spline = gsl_spline_alloc (gsl_interp_cspline, nbin); for (i=0; i<nbin; i++){ z_Q[i] = z_arr[cnt-i]; Q_value[i] = Q_arr[cnt-i]; } gsl_set_error_handler_off(); gsl_status = gsl_spline_init(Q_at_z_spline, z_Q, Q_value, nbin); GSL_ERROR(gsl_status); Zmin = z_Q[0]; Zmax = z_Q[nbin-1]; Qmin = Q_value[nbin-1]; Qmax = Q_value[0]; // initialise interpolation z as a function of Q double *Q_z = calloc(nbin,sizeof(double)); double *z_value = calloc(nbin,sizeof(double)); z_at_Q_spline_acc = gsl_interp_accel_alloc (); z_at_Q_spline = gsl_spline_alloc (gsl_interp_linear, nbin); for (i=0; i<nbin; i++){ Q_z[i] = Q_value[nbin-1-i]; z_value[i] = z_Q[nbin-1-i]; } gsl_status = gsl_spline_init(z_at_Q_spline, Q_z, z_value, nbin); GSL_ERROR(gsl_status); free(z_arr); free(Q_arr); if (flag_options->USE_MASS_DEPENDENT_ZETA) { freeSigmaMInterpTable; } LOG_DEBUG("Initialised PhotonCons."); } // End of try Catch(status){ return status; } return(0); } // Function to construct the spline for the calibration curve of the photon non-conservation int PhotonCons_Calibration(double *z_estimate, double *xH_estimate, int NSpline){ int status; Try{ if(xH_estimate[NSpline-1] > 0.0 && xH_estimate[NSpline-2] > 0.0 && xH_estimate[NSpline-3] > 0.0 && xH_estimate[0] <= global_params.PhotonConsStart) { initialise_NFHistory_spline(z_estimate,xH_estimate,NSpline); } } Catch(status){ return status; } return(0); } // Function callable from Python to know at which redshift to start sampling the calibration curve (to minimise function calls) int ComputeZstart_PhotonCons(double *zstart) { int status; double temp; Try{ if((1.-global_params.PhotonConsStart) > Qmax) { // It is possible that reionisation never even starts // Just need to arbitrarily set a high redshift to perform the algorithm temp = 20.; } else { z_at_Q(1. - global_params.PhotonConsStart,&(temp)); // Multiply the result by 10 per-cent to fix instances when this isn't high enough temp *= 1.1; } } Catch(status){ return(status); // Use the status to determine if something went wrong. } *zstart = temp; return(0); } void determine_deltaz_for_photoncons() { int i, j, increasing_val, counter, smoothing_int; double temp; float z_cal, z_analytic, NF_sample, returned_value, NF_sample_min, gradient_analytic, z_analytic_at_endpoint, const_offset, z_analytic_2, smoothing_width; float bin_width, delta_NF, val1, val2, extrapolated_value; LOG_DEBUG("Determining deltaz for photon cons."); // Number of points for determine the delta z correction of the photon non-conservation N_NFsamples = 100; // Determine the change in neutral fraction to calculate the gradient for the linear extrapolation of the photon non-conservation correction delta_NF = 0.025; // A width (in neutral fraction data points) in which point we average over to try and avoid sharp features in the correction (removes some kinks) // Effectively acts as filtering step smoothing_width = 35.; // The photon non-conservation correction has a threshold (in terms of neutral fraction; global_params.PhotonConsEnd) for which we switch // from using the exact correction between the calibrated (21cmFAST all flag options off) to analytic expression to some extrapolation. // This threshold is required due to the behaviour of 21cmFAST at very low neutral fractions, which cause extreme behaviour with recombinations on // A lot of the steps and choices are not completely rubust, just chosed to smooth/average the data to have smoother resultant reionisation histories // Determine the number of extrapolated points required, if required at all. if(calibrated_NF_min < global_params.PhotonConsEnd) { // We require extrapolation, set minimum point to the threshold, and extrapolate beyond. NF_sample_min = global_params.PhotonConsEnd; // Determine the number of extrapolation points (to better smooth the correction) between the threshod (global_params.PhotonConsEnd) and a // point close to zero neutral fraction (set by global_params.PhotonConsAsymptoteTo) // Choice is to get the delta neutral fraction between extrapolated points to be similar to the cadence in the exact correction if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - calibrated_NF_min)/( global_params.PhotonConsStart - NF_sample_min ); } else { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - global_params.PhotonConsAsymptoteTo)/( global_params.PhotonConsStart - NF_sample_min ); } N_extrapolated = (int)floor( N_extrapolated ) - 1; // Minus one as the zero point is added below } else { // No extrapolation required, neutral fraction never reaches zero NF_sample_min = calibrated_NF_min; N_extrapolated = 0; } // Determine the bin width for the sampling of the neutral fraction for the correction bin_width = ( global_params.PhotonConsStart - NF_sample_min )/((float)N_NFsamples - 1.); // allocate memory for arrays required to determine the photon non-conservation correction deltaz = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); deltaz_smoothed = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); NeutralFractions = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); // Go through and fill the data points (neutral fraction and corresponding delta z between the calibrated and analytic curves). for(i=0;i<N_NFsamples;i++) { NF_sample = NF_sample_min + bin_width*(float)i; // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; deltaz[i+1+N_extrapolated] = fabs( z_cal - z_analytic ); NeutralFractions[i+1+N_extrapolated] = NF_sample; } // Determining the end-point (lowest neutral fraction) for the photon non-conservation correction if(calibrated_NF_min >= global_params.PhotonConsEnd) { increasing_val = 0; counter = 0; // Check if all the values of delta z are increasing for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } // If all the values of delta z are increasing, then some of the smoothing of the correction done below cannot be performed if(counter==(N_NFsamples-1)) { increasing_val = 1; } // Since we never have reionisation, need to set an appropriate end-point for the correction // Take some fraction of the previous point to determine the end-point NeutralFractions[0] = 0.999*NF_sample_min; if(increasing_val) { // Values of delta z are always increasing with decreasing neutral fraction thus make the last point slightly larger deltaz[0] = 1.001*deltaz[1]; } else { // Values of delta z are always decreasing with decreasing neutral fraction thus make the last point slightly smaller deltaz[0] = 0.999*deltaz[1]; } } else { // Ok, we are going to be extrapolating the photon non-conservation (delta z) beyond the threshold // Construct a linear curve for the analytic function to extrapolate to the new endpoint // The choice for doing so is to ensure the corrected reionisation history is mostly smooth, and doesn't // artificially result in kinks due to switching between how the delta z should be calculated z_at_Q(1. - (NeutralFractions[1+N_extrapolated] + delta_NF),&(temp)); z_analytic = temp; z_at_Q(1. - NeutralFractions[1+N_extrapolated],&(temp)); z_analytic_2 = temp; // determine the linear curve // Multiplitcation by 1.1 is arbitrary but effectively smooths out most kinks observed in the resultant corrected reionisation histories gradient_analytic = 1.1*( delta_NF )/( z_analytic - z_analytic_2 ); const_offset = ( NeutralFractions[1+N_extrapolated] + delta_NF ) - gradient_analytic * z_analytic; // determine the extrapolation end point if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { extrapolated_value = calibrated_NF_min; } else { extrapolated_value = global_params.PhotonConsAsymptoteTo; } // calculate the delta z for the extrapolated end point z_at_NFHist(extrapolated_value,&(temp)); z_cal = temp; z_analytic_at_endpoint = ( extrapolated_value - const_offset )/gradient_analytic ; deltaz[0] = fabs( z_cal - z_analytic_at_endpoint ); NeutralFractions[0] = extrapolated_value; // If performing extrapolation, add in all the extrapolated points between the end-point and the threshold to end the correction (global_params.PhotonConsEnd) for(i=0;i<N_extrapolated;i++) { if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { NeutralFractions[i+1] = calibrated_NF_min + (NF_sample_min - calibrated_NF_min)*(float)(i+1)/((float)N_extrapolated + 1.); } else { NeutralFractions[i+1] = global_params.PhotonConsAsymptoteTo + (NF_sample_min - global_params.PhotonConsAsymptoteTo)*(float)(i+1)/((float)N_extrapolated + 1.); } deltaz[i+1] = deltaz[0] + ( deltaz[1+N_extrapolated] - deltaz[0] )*(float)(i+1)/((float)N_extrapolated + 1.); } } // We have added the extrapolated values, now check if they are all increasing or not (again, to determine whether or not to try and smooth the corrected curve increasing_val = 0; counter = 0; for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } if(counter==(N_NFsamples-1)) { increasing_val = 1; } // For some models, the resultant delta z for extremely high neutral fractions ( > 0.95) seem to oscillate or sometimes drop in value. // This goes through and checks if this occurs, and tries to smooth this out // This doesn't occur very often, but can cause an artificial drop in the reionisation history (neutral fraction value) connecting the // values before/after the photon non-conservation correction starts. for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[i]; val2 = deltaz[i+1]; counter = 0; // Check if we have a neutral fraction above 0.95, that the values are decreasing (val2 < val1), that we haven't sampled too many points (counter) // and that the NF_sample_min is less than around 0.8. That is, if a reasonable fraction of the reionisation history is sampled. while( NeutralFractions[i+1] > 0.95 && val2 < val1 && NF_sample_min < 0.8 && counter < 100) { NF_sample = global_params.PhotonConsStart - 0.001*(counter+1); // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; // Determine the delta z val2 = fabs( z_cal - z_analytic ); deltaz[i+1] = val2; counter += 1; // If after 100 samplings we couldn't get the value to increase (like it should), just modify it from the previous point. if(counter==100) { deltaz[i+1] = deltaz[i] * 1.01; } } } // Store the data in its intermediate state before averaging for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { deltaz_smoothed[i] = deltaz[i]; } // If we are not increasing for all values, we can smooth out some features in delta z when connecting the extrapolated delta z values // compared to those from the exact correction (i.e. when we cross the threshold). if(!increasing_val) { for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[0]; val2 = deltaz[i+1]; counter = 0; // Try and find a point which can be used to smooth out any dip in delta z as a function of neutral fraction. // It can be flat, then drop, then increase. This smooths over this drop (removes a kink in the resultant reionisation history). // Choice of 75 is somewhat arbitrary while(val2 < val1 && (counter < 75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated))) { counter += 1; val2 = deltaz[i+1+counter]; deltaz_smoothed[i+1] = ( val1 + deltaz[1+(i+1)+counter] )/2.; } if(counter==75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated)) { deltaz_smoothed[i+1] = deltaz[i+1]; } } } // Here we effectively filter over the delta z as a function of neutral fraction to try and minimise any possible kinks etc. in the functional curve. for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { // We are at the end-points, cannot smooth if(i==0 || i==(N_NFsamples+N_extrapolated)) { deltaz[i] = deltaz_smoothed[i]; } else { deltaz[i] = 0.; // We are symmetrically smoothing, making sure we have the same number of data points either side of the point we are filtering over // This determins the filter width when close to the edge of the data ranges if( (i - (int)floor(smoothing_width/2.) ) < 0) { smoothing_int = 2*( i ) + (int)((int)smoothing_width%2); } else if( (i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) > (N_NFsamples + N_extrapolated) ) { smoothing_int = ((int)smoothing_width - 1) - 2*((i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) - (N_NFsamples + N_extrapolated) ) + (int)((int)smoothing_width%2); } else { smoothing_int = (int)smoothing_width; } // Average (filter) over the delta z values to smooth the result counter = 0; for(j=0;j<(int)smoothing_width;j++) { if(((i - (int)floor((float)smoothing_int/2.) + j)>=0) && ((i - (int)floor((float)smoothing_int/2.) + j) <= (N_NFsamples + N_extrapolated + 1)) && counter < smoothing_int ) { deltaz[i] += deltaz_smoothed[i - (int)floor((float)smoothing_int/2.) + j]; counter += 1; } } deltaz[i] /= (float)counter; } } N_deltaz = N_NFsamples + N_extrapolated + 1; // Now, we can construct the spline of the photon non-conservation correction (delta z as a function of neutral fraction) deltaz_spline_for_photoncons_acc = gsl_interp_accel_alloc (); deltaz_spline_for_photoncons = gsl_spline_alloc (gsl_interp_linear, N_NFsamples + N_extrapolated + 1); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(deltaz_spline_for_photoncons, NeutralFractions, deltaz, N_NFsamples + N_extrapolated + 1); GSL_ERROR(gsl_status); } float adjust_redshifts_for_photoncons( struct AstroParams *astro_params, struct FlagOptions *flag_options, float *redshift, float *stored_redshift, float *absolute_delta_z ) { int i, new_counter; double temp; float required_NF, adjusted_redshift, future_z, gradient_extrapolation, const_extrapolation, temp_redshift, check_required_NF; LOG_DEBUG("Adjusting redshifts for photon cons."); if(*redshift < global_params.PhotonConsEndCalibz) { LOG_ERROR( "You have passed a redshift (z = %f) that is lower than the enpoint of the photon non-conservation correction "\ "(global_params.PhotonConsEndCalibz = %f). If this behaviour is desired then set global_params.PhotonConsEndCalibz "\ "to a value lower than z = %f.",*redshift,global_params.PhotonConsEndCalibz,*redshift ); // Throw(ParameterError); Throw(PhotonConsError); } // Determine the neutral fraction (filling factor) of the analytic calibration expression given the current sampled redshift Q_at_z(*redshift, &(temp)); required_NF = 1.0 - (float)temp; // Find which redshift we need to sample in order for the calibration reionisation history to match the analytic expression if(required_NF > global_params.PhotonConsStart) { // We haven't started ionising yet, so keep redshifts the same adjusted_redshift = *redshift; *absolute_delta_z = 0.; } else if(required_NF<=global_params.PhotonConsEnd) { // We have gone beyond the threshold for the end of the photon non-conservation correction // Deemed to be roughly where the calibration curve starts to approach the analytic expression if(FirstNF_Estimate <= 0. && required_NF <= 0.0) { // Reionisation has already happened well before the calibration adjusted_redshift = *redshift; } else { // We have crossed the NF threshold for the photon conservation correction so now set to the delta z at the threshold if(required_NF < global_params.PhotonConsAsymptoteTo) { // This counts the number of times we have exceeded the extrapolated point and attempts to modify the delta z // to try and make the function a little smoother *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, global_params.PhotonConsAsymptoteTo, deltaz_spline_for_photoncons_acc); new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < global_params.PhotonConsAsymptoteTo ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.96 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.04 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } } else { // Initialise the photon non-conservation correction curve if(!photon_cons_allocated) { determine_deltaz_for_photoncons(); photon_cons_allocated = true; } // We have exceeded even the end-point of the extrapolation // Just smooth ever subsequent point // Note that this is deliberately tailored to light-cone quantites, but will still work with co-eval cubes // Though might produce some very minor discrepancies when comparing outputs. if(required_NF < NeutralFractions[0]) { new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < NeutralFractions[0] ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } if(new_counter > 5) { LOG_WARNING( "The photon non-conservation correction has employed an extrapolation for\n"\ "more than 5 consecutive snapshots. This can be unstable, thus please check "\ "resultant history. Parameters are:\n" ); #if LOG_LEVEL >= LOG_WARNING writeAstroParams(flag_options, astro_params); #endif } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.998 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.002 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { // Find the corresponding redshift for the calibration curve given the required neutral fraction (filling factor) from the analytic expression *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, (double)required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } // keep the original sampled redshift *stored_redshift = *redshift; // This redshift snapshot now uses the modified redshift following the photon non-conservation correction *redshift = adjusted_redshift; } void Q_at_z(double z, double *splined_value){ float returned_value; if (z >= Zmax) { *splined_value = 0.; } else if (z <= Zmin) { *splined_value = 1.; } else { returned_value = gsl_spline_eval(Q_at_z_spline, z, Q_at_z_spline_acc); *splined_value = returned_value; } } void z_at_Q(double Q, double *splined_value){ float returned_value; if (Q < Qmin) { LOG_ERROR("The minimum value of Q is %.4e",Qmin); // Throw(ParameterError); Throw(PhotonConsError); } else if (Q > Qmax) { LOG_ERROR("The maximum value of Q is %.4e. Reionization ends at ~%.4f.",Qmax,Zmin); LOG_ERROR("This error can occur if global_params.PhotonConsEndCalibz is close to "\ "the final sampled redshift. One can consider a lower value for "\ "global_params.PhotonConsEndCalibz to mitigate this"); // Throw(ParameterError); Throw(PhotonConsError); } else { returned_value = gsl_spline_eval(z_at_Q_spline, Q, z_at_Q_spline_acc); *splined_value = returned_value; } } void free_Q_value() { gsl_spline_free (Q_at_z_spline); gsl_interp_accel_free (Q_at_z_spline_acc); gsl_spline_free (z_at_Q_spline); gsl_interp_accel_free (z_at_Q_spline_acc); } void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline){ int i, counter, start_index, found_start_index; // This takes in the data for the calibration curve for the photon non-conservation correction counter = 0; start_index = 0; found_start_index = 0; FinalNF_Estimate = NF_estimate[0]; FirstNF_Estimate = NF_estimate[NSpline-1]; // Determine the point in the data where its no longer zero (basically to avoid too many zeros in the spline) for(i=0;i<NSpline-1;i++) { if(NF_estimate[i+1] > NF_estimate[i]) { if(found_start_index == 0) { start_index = i; found_start_index = 1; } } counter += 1; } counter = counter - start_index; N_calibrated = (counter+1); // Store the data points for determining the photon non-conservation correction nf_vals = calloc((counter+1),sizeof(double)); z_vals = calloc((counter+1),sizeof(double)); calibrated_NF_min = 1.; // Store the data, and determine the end point of the input data for estimating the extrapolated results for(i=0;i<(counter+1);i++) { nf_vals[i] = NF_estimate[start_index+i]; z_vals[i] = redshifts[start_index+i]; // At the extreme high redshift end, there can be numerical issues with the solution of the analytic expression if(i>0) { while(nf_vals[i] <= nf_vals[i-1]) { nf_vals[i] += 0.000001; } } if(nf_vals[i] < calibrated_NF_min) { calibrated_NF_min = nf_vals[i]; } } NFHistory_spline_acc = gsl_interp_accel_alloc (); // NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(NFHistory_spline, nf_vals, z_vals, (counter+1)); GSL_ERROR(gsl_status); z_NFHistory_spline_acc = gsl_interp_accel_alloc (); // z_NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); z_NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_status = gsl_spline_init(z_NFHistory_spline, z_vals, nf_vals, (counter+1)); GSL_ERROR(gsl_status); } void z_at_NFHist(double xHI_Hist, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(NFHistory_spline, xHI_Hist, NFHistory_spline_acc); *splined_value = returned_value; } void NFHist_at_z(double z, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(z_NFHistory_spline, z, NFHistory_spline_acc); *splined_value = returned_value; } int ObtainPhotonConsData( double *z_at_Q_data, double *Q_data, int *Ndata_analytic, double *z_cal_data, double *nf_cal_data, int *Ndata_calibration, double *PhotonCons_NFdata, double *PhotonCons_deltaz, int *Ndata_PhotonCons) { int i; *Ndata_analytic = N_analytic; *Ndata_calibration = N_calibrated; *Ndata_PhotonCons = N_deltaz; for(i=0;i<N_analytic;i++) { z_at_Q_data[i] = z_Q[i]; Q_data[i] = Q_value[i]; } for(i=0;i<N_calibrated;i++) { z_cal_data[i] = z_vals[i]; nf_cal_data[i] = nf_vals[i]; } for(i=0;i<N_deltaz;i++) { PhotonCons_NFdata[i] = NeutralFractions[i]; PhotonCons_deltaz[i] = deltaz[i]; } return(0); } void FreePhotonConsMemory() { LOG_DEBUG("Freeing some photon cons memory."); free(deltaz); free(deltaz_smoothed); free(NeutralFractions); free(z_Q); free(Q_value); free(nf_vals); free(z_vals); free_Q_value(); gsl_spline_free (NFHistory_spline); gsl_interp_accel_free (NFHistory_spline_acc); gsl_spline_free (z_NFHistory_spline); gsl_interp_accel_free (z_NFHistory_spline_acc); gsl_spline_free (deltaz_spline_for_photoncons); gsl_interp_accel_free (deltaz_spline_for_photoncons_acc); LOG_DEBUG("Done Freeing photon cons memory."); photon_cons_allocated = false; } void FreeTsInterpolationTables(struct FlagOptions *flag_options) { LOG_DEBUG("Freeing some interpolation table memory."); freeSigmaMInterpTable(); if (flag_options->USE_MASS_DEPENDENT_ZETA) { free(z_val); z_val = NULL; free(Nion_z_val); free(z_X_val); z_X_val = NULL; free(SFRD_val); if (flag_options->USE_MINI_HALOS){ free(Nion_z_val_MINI); free(SFRD_val_MINI); } } else{ free(FgtrM_1DTable_linear); } LOG_DEBUG("Done Freeing interpolation table memory."); interpolation_tables_allocated = false; }
GB_unaryop__identity_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_bool // op(A') function: GB_tran__identity_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_bool ( int64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; ++i) { if (i > 0) printf("\n"); printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; ++q) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; ++j) { for (i = 0; i < 3; ++i) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; ++j) { if (j > 0 && j % 3 == 0) printf("\n"); for (i = 0; i < size; ++i) { if (i > 0 && i % 3 == 0) printf(" "); implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; ++i) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int i = 0; unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections private(i) if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } # if 0 /* this version of the CG algorithm does not work very well with partial constraints (where S has non-zero elements) */ static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); /* conjugate gradient algorithm to solve Ax=b */ // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) root_to_world_v3(data, index, x, data->X[index]); if (v) root_to_world_v3(data, index, v, data->V[index]); } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; ++i) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. //return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) return fbstar_fl; else return tempfb_fl; } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) sub_v3_v3(edge_ij, dx); if (q == j) add_v3_v3(edge_ij, dx); normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) sub_v3_v3(edge_jk, dx); if (q == k) add_v3_v3(edge_jk, dx); normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) sub_v3_v3(vel_ij, dv); if (q == j) add_v3_v3(vel_ij, dv); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) sub_v3_v3(vel_jk, dv); if (q == k) add_v3_v3(vel_jk, dv); /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; ++b) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; ++b) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
GB_unaryop__ainv_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int64 // op(A') function: GB_tran__ainv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int64 ( uint64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
boxFilter_OPSAT_SoA.h
#pragma once #include "boxFilter.hpp" //one pass box filtering SoA class boxFilter_OPSAT_SoA { protected: cv::Mat src; cv::Mat dest; int r; int parallelType; float div; int row; int col; int cn; int loop; std::vector<cv::Mat> vSrc; std::vector<cv::Mat> vDest; virtual void filter_impl(cv::Mat& input, cv::Mat& output); public: boxFilter_OPSAT_SoA(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : src(_src), dest(_dest), r(_r), parallelType(_parallelType) { div = 1.f / ((2 * r + 1)*(2 * r + 1)); row = src.rows; col = src.cols; cn = src.channels(); init(); } virtual void init() { loop = cn; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC1); vDest[i].create(src.size(), CV_32FC1); } } virtual void AoS2SoA(); virtual void SoA2AoS(); void filter() { AoS2SoA(); if (parallelType == ParallelTypes::NAIVE) { for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } else if (parallelType == ParallelTypes::OMP) { #pragma omp parallel for for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } SoA2AoS(); } void filterOnly() { if (parallelType == ParallelTypes::NAIVE) { for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } else if (parallelType == ParallelTypes::OMP) { #pragma omp parallel for for (int i = 0; i < loop; i++) filter_impl(vSrc[i], vDest[i]); } } }; class boxFilter_OPSAT_SoA_SSE : public boxFilter_OPSAT_SoA { private: __m128 mDiv; __m128 mBorder; void filter_impl(cv::Mat& input, cv::Mat& output) override; public: boxFilter_OPSAT_SoA_SSE(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_SoA(_src, _dest, _r, _parallelType) { init(); } void init() override { mDiv = _mm_set1_ps(div); mBorder = _mm_set1_ps(static_cast<float>(r + 1)); loop = cn >> 2; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC4); vDest[i].create(src.size(), CV_32FC4); } } void AoS2SoA() override; void SoA2AoS() override; }; class boxFilter_OPSAT_SoA_AVX : public boxFilter_OPSAT_SoA { private: __m256 mDiv; __m256 mBorder; void filter_impl(cv::Mat& input, cv::Mat& output) override; public: boxFilter_OPSAT_SoA_AVX(cv::Mat& _src, cv::Mat& _dest, int _r, int _parallelType) : boxFilter_OPSAT_SoA(_src, _dest, _r, _parallelType) { init(); } void init() override { mDiv = _mm256_set1_ps(div); mBorder = _mm256_set1_ps(static_cast<float>(r + 1)); loop = cn >> 3; vSrc.resize(loop); vDest.resize(loop); for (int i = 0; i < loop; i++) { vSrc[i].create(src.size(), CV_32FC(8)); vDest[i].create(src.size(), CV_32FC(8)); } } void AoS2SoA() override; void SoA2AoS() override; };
Example_taskloop.1.c
/* * @@name: taskloop.c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.5 */ void long_running_task(void); void loop_body(int i, int j); void parallel_work(void) { int i, j; #pragma omp taskgroup { #pragma omp task long_running_task(); // can execute concurrently #pragma omp taskloop private(j) grainsize(500) nogroup for (i = 0; i < 10000; i++) { // can execute concurrently for (j = 0; j < i; j++) { loop_body(i, j); } } } }
DRB069-sectionslock1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two tasks with a lock synchronization to ensure execution order. */ #include "omprace.h" #include <omp.h> #include <omp.h> #include <assert.h> int main() { omprace_init(); omp_lock_t lck; int i=0; omp_init_lock(&lck); #pragma omp parallel sections { #pragma omp section { omp_set_lock(&lck); i += 1; omp_unset_lock(&lck); } #pragma omp section { omp_set_lock(&lck); i += 2; omp_unset_lock(&lck); } } omp_destroy_lock(&lck); assert (i==3); omprace_fini(); return 0; }
needle.c
#define LIMIT -999 #define TRACE #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #define DEBUG #ifndef VERIFICATION #define VERIFICATION 1 #endif #ifndef ENABLE_OPENACC #define ENABLE_OPENACC 1 #endif #ifndef _MAX_ROWS_ #define _MAX_ROWS_ 2049 #ifdef _OPENARC_ #pragma openarc #define _MAX_ROWS_ 2049 #endif #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; int max_rows, max_cols, penalty; int omp_num_threads; double gettime() { struct timeval t; gettimeofday(&t,0); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { double start_time, end_time; start_time = gettime(); runTest( argc, argv); end_time = gettime(); printf("Total Execution Time %lf sec. \n", end_time - start_time); return EXIT_SUCCESS; } void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> <num_threads>\n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); fprintf(stderr, "\t<num_threads> - no. of threads\n"); exit(1); } void mainComp(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_]) { int i, idx, index; ///////////////////////////////// // Used for inlining maximum() // ///////////////////////////////// int a, b, c, k; #if ENABLE_OPENACC == 1 #pragma acc data \ copy(input_itemsets[0:_MAX_ROWS_*_MAX_ROWS_]) \ copyin(referrence[0:_MAX_ROWS_*_MAX_ROWS_]) #endif { for( i = 0 ; i < max_cols-2 ; i++){ #if ENABLE_OPENACC == 1 #pragma acc kernels loop gang worker independent #endif for( idx = 0 ; idx <= i ; idx++){ index = (idx + 1) * max_cols + (i + 1 - idx); // input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], // input_itemsets[index-1] - penalty, // input_itemsets[index-max_cols] - penalty); a = input_itemsets[index-1-max_cols]+ referrence[index]; b = input_itemsets[index-1] - penalty; c = input_itemsets[index-max_cols] - penalty; if( a <= b ) k = b; else k = a; if( k <=c ) input_itemsets[index] = c; else input_itemsets[index] = k; } } printf("Processing bottom-right matrix\n"); //Compute bottom-right matrix for( i = max_cols - 4 ; i >= 0 ; i--){ #if ENABLE_OPENACC == 1 #pragma acc kernels loop gang worker independent #endif for( idx = 0 ; idx <= i ; idx++){ index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ; //input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], // input_itemsets[index-1] - penalty, // input_itemsets[index-max_cols] - penalty); a = input_itemsets[index-1-max_cols]+ referrence[index]; b = input_itemsets[index-1] - penalty; c = input_itemsets[index-max_cols] - penalty; if( a <= b ) k = b; else k = a; if( k <=c ) input_itemsets[index] = c; else input_itemsets[index] = k; } } } } void mainCompCPU(int input_itemsets[_MAX_ROWS_*_MAX_ROWS_], int referrence[_MAX_ROWS_*_MAX_ROWS_]) { int i, idx, index; ///////////////////////////////// // Used for inlining maximum() // ///////////////////////////////// int a, b, c, k; for( i = 0 ; i < max_cols-2 ; i++){ #ifdef _OPENMP //omp_set_num_threads(omp_num_threads); #pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index) #endif for( idx = 0 ; idx <= i ; idx++){ index = (idx + 1) * max_cols + (i + 1 - idx); // input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], // input_itemsets[index-1] - penalty, // input_itemsets[index-max_cols] - penalty); a = input_itemsets[index-1-max_cols]+ referrence[index]; b = input_itemsets[index-1] - penalty; c = input_itemsets[index-max_cols] - penalty; if( a <= b ) k = b; else k = a; if( k <=c ) input_itemsets[index] = c; else input_itemsets[index] = k; } } //Compute bottom-right matrix for( i = max_cols - 4 ; i >= 0 ; i--){ #ifdef _OPENMP //omp_set_num_threads(omp_num_threads); #pragma omp parallel for shared(input_itemsets) firstprivate(i,max_cols,penalty) private(idx, index) #endif for( idx = 0 ; idx <= i ; idx++){ index = ( max_cols - idx - 2 ) * max_cols + idx + max_cols - i - 2 ; //input_itemsets[index]= maximum( input_itemsets[index-1-max_cols]+ referrence[index], // input_itemsets[index-1] - penalty, // input_itemsets[index-max_cols] - penalty); a = input_itemsets[index-1-max_cols]+ referrence[index]; b = input_itemsets[index-1] - penalty; c = input_itemsets[index-max_cols] - penalty; if( a <= b ) k = b; else k = a; if( k <=c ) input_itemsets[index] = c; else input_itemsets[index] = k; } } } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int *input_itemsets, *output_itemsets, *referrence; int i,j; #ifdef DEBUG double start_time, end_time; #endif #ifdef TRACE FILE *fp; #endif // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 4) { max_rows = atoi(argv[1]); max_cols = atoi(argv[1]); penalty = atoi(argv[2]); omp_num_threads = atoi(argv[3]); if( max_rows != (_MAX_ROWS_-1) ) { printf("Wrong value (%d) for macro, _MAX_ROWS_!\n", _MAX_ROWS_); return; } } else{ usage(argc, argv); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (i = 0 ; i < max_cols; i++){ for (j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } printf("Start Needleman-Wunsch\n"); for( i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (i = 1 ; i < max_cols; i++){ for (j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; //Compute top-left matrix printf("Num of threads: %d\n", omp_num_threads); printf("Processing top-left matrix\n"); #ifdef DEBUG start_time = gettime(); #endif mainComp(input_itemsets, referrence); #ifdef DEBUG end_time = gettime(); printf("Accelerator Elapsed Time = %lf sec. \n", end_time - start_time); #endif if(VERIFICATION) { int *input_itemsets_CPU; double deltaL2Norm = 0; double nonAccL2Norm = 0; double L2Norm; input_itemsets_CPU = (int *)malloc( max_rows * max_cols * sizeof(int) ); srand ( 7 ); for (i = 0 ; i < max_cols; i++){ for (j = 0 ; j < max_rows; j++){ input_itemsets_CPU[i*max_cols+j] = 0; } } for( i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets_CPU[i*max_cols] = rand() % 10 + 1; } for( j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets_CPU[j] = rand() % 10 + 1; } for( i = 1; i< max_rows ; i++) input_itemsets_CPU[i*max_cols] = -i * penalty; for( j = 1; j< max_cols ; j++) input_itemsets_CPU[j] = -j * penalty; #ifdef DEBUG start_time = gettime(); #endif mainCompCPU(input_itemsets_CPU, referrence); #ifdef DEBUG end_time = gettime(); printf("Main Comp. Time CPU = %lf sec. \n", end_time - start_time); #endif for (i = 0; i < max_rows * max_cols; ++i) { double d = input_itemsets_CPU[i] - input_itemsets[i]; deltaL2Norm += d * d; nonAccL2Norm += input_itemsets_CPU[i] * input_itemsets_CPU[i]; } L2Norm = sqrt(deltaL2Norm / nonAccL2Norm); if (L2Norm < 1e-9) { printf("Verification: Successful\n"); } else { printf("Verification: Failed\n"); } printf("L2Norm = %lf\n", L2Norm); free(input_itemsets_CPU); } #ifdef TRACE printf("print traceback value CPU:\n"); if( (fp = fopen("nwTrace.txt", "w")) == 0 ) { printf("Can not open %s\n", "nwTrace.txt"); return; } //int i, j; for (i = j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fp, "%d ", input_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = input_itemsets[(i - 1) * max_cols + j - 1]; w = input_itemsets[ i * max_cols + j - 1 ]; n = input_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = input_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = input_itemsets[(i - 1) * max_cols + j]; } else{ } traceback = maximum(nw, w, n); fprintf(fp, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fprintf(fp, "\n"); fclose(fp); #endif }
base_mortar_criteria.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_BASE_MORTAR_CRITERIA_H) #define KRATOS_BASE_MORTAR_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "custom_utilities/contact_utilities.h" #include "utilities/mortar_utilities.h" #include "utilities/variable_utils.h" #include "utilities/normal_calculation_utils.h" #include "custom_processes/aalm_adapt_penalty_value_process.h" #include "custom_processes/compute_dynamic_factor_process.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" // DEBUG #include "includes/gid_io.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class BaseMortarConvergenceCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Custom convergence criteria for the mortar condition * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class BaseMortarConvergenceCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of BaseMortarConvergenceCriteria KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR ); KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); /// The base class definition typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; /// The definition of the current class typedef BaseMortarConvergenceCriteria< TSparseSpace, TDenseSpace > ClassType; /// The dofs array type typedef typename BaseType::DofsArrayType DofsArrayType; /// The sparse matrix type typedef typename BaseType::TSystemMatrixType TSystemMatrixType; /// The dense vector type typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The GidIO type typedef GidIO<> GidIOBaseType; ///@} ///@name Life Cycle ///@{ /// Default constructors explicit BaseMortarConvergenceCriteria( const bool ComputeDynamicFactor = false, const bool IODebug = false, const bool PureSlip = false ) : BaseType(), mpIO(nullptr) { // Set local flags mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor); mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug); mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip); if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly); } } /** * @brief Default constructor. (with parameters) * @param ThisParameters The configuration parameters */ explicit BaseMortarConvergenceCriteria(Kratos::Parameters ThisParameters) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } ///Copy constructor BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther ) :BaseType(rOther), mOptions(rOther.mOptions), mpIO(rOther.mpIO) { } /// Destructor ~BaseMortarConvergenceCriteria() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create(Parameters ThisParameters) const override { return Kratos::make_shared<ClassType>(ThisParameters); } /** * @brief Criterias that need to be called before getting the solution * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PreCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // The contact model part ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); // We update the normals if necessary const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION; if (normal_variation != NO_DERIVATIVES_COMPUTATION) { ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions } // Update tangent (must be updated even for constant normal) const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false; const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY); /* Compute weighthed gap */ if (adapt_penalty || dynamic_case) { // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); } // In dynamic case if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) { ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part ); compute_dynamic_factor_process.Execute(); } // We recalculate the penalty parameter if ( adapt_penalty ) { AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part ); aalm_adaptation_of_penalty.Execute(); } return true; } /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // We save the current WEIGHTED_GAP in the buffer auto& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); const auto it_node_begin = r_nodes_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) { auto it_node = it_node_begin + i; it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); } // Set to zero the weighted gap ResetWeightedGap(rModelPart); // Compute the contribution ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact")); // GiD IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER]; const double label = static_cast<double>(nl_iter); if (nl_iter == 1) { mpIO->InitializeMesh(label); mpIO->WriteMesh(rModelPart.GetMesh()); mpIO->FinalizeMesh(); mpIO->InitializeResults(label, rModelPart.GetMesh()); } mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label); mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label); mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label); mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0); if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) { mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0); } if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0); else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X)) mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0); if (frictional_problem) { mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label); mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0); mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label); } } return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart The model part of interest */ void Initialize(ModelPart& rModelPart) override { // Calling base criteria BaseType::Initialize(rModelPart); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Update normal of the conditions ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); NormalCalculationUtils().CalculateUnitNormals<Condition>(r_contact_model_part, true); const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false; if (frictional_problem) { const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER); if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part); } else { MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true); } } // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->CloseResultFile(); std::ostringstream new_name ; new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP]; mpIO->ChangeOutputName(new_name.str()); } } /** * @brief This function finalizes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // IO for debugging if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO->FinalizeResults(); } } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "base_mortar_criteria", "compute_dynamic_factor" : false, "gidio_debug" : false, "pure_slip" : false })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "base_mortar_criteria"; } ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "BaseMortarConvergenceCriteria"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Flags mOptions; /// Local flags ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // Set local flags mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ThisParameters["compute_dynamic_factor"].GetBool()); mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, ThisParameters["gidio_debug"].GetBool()); mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool()); if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) { mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly); } } /** * @brief This method resets the weighted gap in the nodes of the problem * @param rModelPart Reference to the ModelPart containing the contact problem. */ virtual void ResetWeightedGap(ModelPart& rModelPart) { auto& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes(); VariableUtils().SetVariable(WEIGHTED_GAP, 0.0, r_nodes_array); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief It computes the mean of the normal in the condition in all the nodes * @param rModelPart The model part to compute */ inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart) { // Compute normal and tangent ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact"); NormalCalculationUtils().CalculateUnitNormals<Condition>(r_contact_model_part, true); // Iterate over the computing conditions ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact"); auto& r_conditions_array = r_computing_contact_model_part.Conditions(); const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Aux coordinates Point::CoordinatesArrayType aux_coords; // We update the paired normal GeometryType& r_parent_geometry = it_cond->GetGeometry().GetGeometryPart(0); aux_coords = r_parent_geometry.PointLocalCoordinates(aux_coords, r_parent_geometry.Center()); it_cond->SetValue(NORMAL, r_parent_geometry.UnitNormal(aux_coords)); } } ///@} ///@name Private Access ///@{ ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Class BaseMortarConvergenceCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2)); } // namespace Kratos #endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
matfilter.c
#include "matrix.h" /** \brief Computes 2-D convolution * * \param[in] A Input matrix * \param[in] mask Input kernel/mask * \param[in] scratch Scratch matrix for temporary calculations * \param[in] result Matrix to store the result * \return Convolved output matrix * */ MATRIX mat_conv2(MATRIX A, MATRIX mask, MATRIX scratch, MATRIX result) { int i, j, k, l, m, n, o, p, ii, jj, mm, nn, flag = 0; m = MatCol(A); n = MatRow(A); o = MatCol(mask); p = MatRow(mask); if((o%2)!=1 ||(p%2)!=1) gen_error(GEN_SIZE_ERROR); ii = (p-1)/2; jj = (o-1)/2; mm = jj+jj+m; nn = ii+ii+n; l = jj+m; k = ii+n; if(scratch==NULL) { if((scratch = mat_creat(nn, mm, UNDEFINED))==NULL) mat_error(MAT_MALLOC); flag = 1; } #pragma omp parallel for private(j) firstprivate(mm, ii, jj, k, l) for(i=0; i<nn; ++i) { for(j=0; j<mm; ++j) { if(i<ii || j<jj || i>=k || j>=l ) scratch[i][j] = 0.0; else scratch[i][j] = A[i-ii][j-jj]; } } if(result==NULL) if((result = mat_creat(n, m, UNDEFINED))==NULL) mat_error(MAT_MALLOC); #pragma omp parallel for private(j) firstprivate(m, ii, jj, k, l) for(i=0; i<n; ++i) { for(j=0; j<m; ++j) { mtype acc = 0.0; for(k = -ii; k<=ii; ++k) { for(l = -jj; l<=jj; ++l) { acc += scratch[i+ii+k][j+jj+l]*mask[ii-k][jj-l]; } } result[i][j] = acc; } } if(flag==1) mat_free(scratch); return result; }
map-4.c
/* PR c/96678. */ #define SIZE (100) typedef double Grid[SIZE]; void test (Grid src1) { #pragma omp target map(alloc:src1[:]) /* { dg-error "for array function parameter length expression must be specified" } */ { src1[0] = 5; } } void test2 (double src2[]) { #pragma omp target map(alloc:src2[:]) /* { dg-error "for array function parameter length expression must be specified" } */ { src2[0] = 5; } } void test3 (double *src3) { #pragma omp target map(alloc:src3[:]) /* { dg-error "for pointer type length expression must be specified" } */ { src3[0] = 5; } }
HybridRepSetReader.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// /** @file * * derived from SplineSetReader */ #ifndef QMCPLUSPLUS_HYBRIDREP_READER_H #define QMCPLUSPLUS_HYBRIDREP_READER_H #include <Numerics/Quadrature.h> #include <Numerics/Bessel.h> #include <QMCWaveFunctions/BsplineFactory/HybridRepCenterOrbitals.h> #include "OhmmsData/AttributeSet.h" //#include <QMCHamiltonians/Ylm.h> //#define PRINT_RADIAL namespace qmcplusplus { template<typename ST, typename LT> struct Gvectors { typedef TinyVector<ST, 3> PosType; typedef std::complex<ST> ValueType; const LT& Lattice; std::vector<PosType> gvecs_cart; //Cartesian. std::vector<ST> gmag; const size_t NumGvecs; Gvectors(const std::vector<TinyVector<int, 3>>& gvecs_in, const LT& Lattice_in, const TinyVector<int, 3>& HalfG, size_t first, size_t last) : Lattice(Lattice_in), NumGvecs(last - first) { gvecs_cart.resize(NumGvecs); gmag.resize(NumGvecs); #pragma omp parallel for for (size_t ig = 0; ig < NumGvecs; ig++) { TinyVector<ST, 3> gvec_shift; gvec_shift = gvecs_in[ig + first] + HalfG * 0.5; gvecs_cart[ig] = Lattice.k_cart(gvec_shift); gmag[ig] = std::sqrt(dot(gvecs_cart[ig], gvecs_cart[ig])); } } template<typename YLM_ENGINE, typename VVT> void calc_Ylm_G(const size_t ig, YLM_ENGINE& Ylm, VVT& YlmG) const { PosType Ghat(0.0, 0.0, 1.0); if (gmag[ig] > 0) Ghat = gvecs_cart[ig] / gmag[ig]; Ylm.evaluateV(Ghat[0], Ghat[1], Ghat[2], YlmG.data()); } template<typename VVT> inline void calc_jlm_G(const int lmax, ST& r, const size_t ig, VVT& j_lm_G) const { bessel_steed_array_cpu(lmax, gmag[ig] * r, j_lm_G.data()); for (size_t l = lmax; l > 0; l--) for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) j_lm_G[lm] = j_lm_G[l]; } template<typename PT, typename VT> inline void calc_phase_shift(const PT& RSoA, const size_t ig, VT& phase_shift_real, VT& phase_shift_imag) const { const ST* restrict px = RSoA.data(0); const ST* restrict py = RSoA.data(1); const ST* restrict pz = RSoA.data(2); ST* restrict v_r = phase_shift_real.data(); ST* restrict v_i = phase_shift_imag.data(); const ST& gv_x = gvecs_cart[ig][0]; const ST& gv_y = gvecs_cart[ig][1]; const ST& gv_z = gvecs_cart[ig][2]; #pragma omp simd aligned(px, py, pz, v_r, v_i) for (size_t iat = 0; iat < RSoA.size(); iat++) sincos(px[iat] * gv_x + py[iat] * gv_y + pz[iat] * gv_z, v_i + iat, v_r + iat); } template<typename PT> ValueType evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos) { assert(cG.size() == NumGvecs); std::complex<ST> val(0.0, 0.0); for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); val += cG[ig] * pw0; } return val; } template<typename PT> void evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos, ValueType& phi, ValueType& d2phi) { assert(cG.size() == NumGvecs); d2phi = phi = 0.0; for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); phi += cG[ig] * pw0; d2phi += cG[ig] * pw0 * (-dot(gvecs_cart[ig], gvecs_cart[ig])); } } double evaluate_KE(const Vector<std::complex<double>>& cG) { assert(cG.size() == NumGvecs); double KE = 0; for (size_t ig = 0; ig < NumGvecs; ig++) KE += dot(gvecs_cart[ig], gvecs_cart[ig]) * (cG[ig].real() * cG[ig].real() + cG[ig].imag() * cG[ig].imag()); return KE / 2.0; } }; /** General HybridRepSetReader to handle any unitcell */ template<typename SA> struct HybridRepSetReader : public SplineSetReader<SA> { typedef SplineSetReader<SA> BaseReader; using BaseReader::bspline; using BaseReader::mybuilder; using BaseReader::rotate_phase_i; using BaseReader::rotate_phase_r; using typename BaseReader::DataType; HybridRepSetReader(EinsplineSetBuilder* e) : BaseReader(e) {} /** initialize basic parameters of atomic orbitals */ void initialize_hybridrep_atomic_centers() override { OhmmsAttributeSet a; std::string scheme_name("Consistent"); std::string s_function_name("LEKS2018"); a.add(scheme_name, "smoothing_scheme"); a.add(s_function_name, "smoothing_function"); a.put(mybuilder->XMLRoot); // assign smooth_scheme if (scheme_name == "Consistent") bspline->smooth_scheme = SA::smoothing_schemes::CONSISTENT; else if (scheme_name == "SmoothAll") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHALL; else if (scheme_name == "SmoothPartial") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHPARTIAL; else APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_scheme name! Only allows Consistent, SmoothAll or " "SmoothPartial."); // assign smooth_function if (s_function_name == "LEKS2018") bspline->smooth_func_id = smoothing_functions::LEKS2018; else if (s_function_name == "coscos") bspline->smooth_func_id = smoothing_functions::COSCOS; else if (s_function_name == "linear") bspline->smooth_func_id = smoothing_functions::LINEAR; else APP_ABORT( "initialize_hybridrep_atomic_centers wrong smoothing_function name! Only allows LEKS2018, coscos or linear."); app_log() << "Hybrid orbital representation uses " << scheme_name << " smoothing scheme and " << s_function_name << " smoothing function." << std::endl; bspline->set_info(*(mybuilder->SourcePtcl), mybuilder->TargetPtcl, mybuilder->Super2Prim); auto& centers = bspline->AtomicCenters; auto& ACInfo = mybuilder->AtomicCentersInfo; // load atomic center info only when it is not initialized if (centers.size() == 0) { bool success = true; app_log() << "Reading atomic center info for hybrid representation" << std::endl; for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { const int my_GroupID = ACInfo.GroupID[center_idx]; if (ACInfo.cutoff[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'cutoff_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.inner_cutoff[center_idx] < 0) { const double inner_cutoff = std::max(ACInfo.cutoff[center_idx] - 0.3, 0.0); app_log() << "Hybrid orbital representation setting 'inner_cutoff' to " << inner_cutoff << " for group " << my_GroupID << " as atom " << center_idx << std::endl; // overwrite the inner_cutoff of all the atoms of the same species for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) ACInfo.inner_cutoff[id] = inner_cutoff; } else if (ACInfo.inner_cutoff[center_idx] > ACInfo.cutoff[center_idx]) { app_error() << "Hybrid orbital representation 'inner_cutoff' must be smaller than 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.cutoff[center_idx] > 0) { if (ACInfo.lmax[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'lmax' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_radius[center_idx] < 0 && ACInfo.spline_npoints[center_idx] < 0) { app_log() << "Parameters 'spline_radius' and 'spline_npoints' for group " << my_GroupID << " as atom " << center_idx << " are not specified." << std::endl; const double delta = std::min(0.02, ACInfo.cutoff[center_idx] / 4.0); const int n_grid_point = std::ceil((ACInfo.cutoff[center_idx] + 1e-4) / delta) + 3; for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) { ACInfo.spline_npoints[id] = n_grid_point; ACInfo.spline_radius[id] = (n_grid_point - 1) * delta; } app_log() << " Based on default grid point distance " << delta << std::endl; app_log() << " Setting 'spline_npoints' to " << ACInfo.spline_npoints[center_idx] << std::endl; app_log() << " Setting 'spline_radius' to " << ACInfo.spline_radius[center_idx] << std::endl; } else { if (ACInfo.spline_radius[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_npoints[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_npoints' for atom " << center_idx << std::endl; success = false; } } // check maximally allowed cutoff_radius double max_allowed_cutoff = ACInfo.spline_radius[center_idx] - 2.0 * ACInfo.spline_radius[center_idx] / (ACInfo.spline_npoints[center_idx] - 1); if (success && ACInfo.cutoff[center_idx] > max_allowed_cutoff) { app_error() << "Hybrid orbital representation requires cutoff_radius<=" << max_allowed_cutoff << " calculated by spline_radius-2*spline_radius/(spline_npoints-1) for atom " << center_idx << std::endl; success = false; } } else { // no atomic regions for this atom type ACInfo.spline_radius[center_idx] = 0.0; ACInfo.spline_npoints[center_idx] = 0; ACInfo.lmax[center_idx] = 0; } } if (!success) BaseReader::myComm->barrier_and_abort("initialize_hybridrep_atomic_centers Failed to initialize atomic centers " "in hybrid orbital representation!"); for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { AtomicOrbitals<DataType> oneCenter(ACInfo.lmax[center_idx]); oneCenter.set_info(ACInfo.ion_pos[center_idx], ACInfo.cutoff[center_idx], ACInfo.inner_cutoff[center_idx], ACInfo.spline_radius[center_idx], ACInfo.non_overlapping_radius[center_idx], ACInfo.spline_npoints[center_idx]); centers.push_back(oneCenter); } } } /** initialize construct atomic orbital radial functions from plane waves */ inline void create_atomic_centers_Gspace(Vector<std::complex<double>>& cG, Communicate& band_group_comm, int iorb) override { band_group_comm.bcast(rotate_phase_r); band_group_comm.bcast(rotate_phase_i); band_group_comm.bcast(cG); //distribute G-vectors over processor groups const int Ngvecs = mybuilder->Gvecs[0].size(); const int Nprocs = band_group_comm.size(); const int Ngvecgroups = std::min(Ngvecs, Nprocs); Communicate gvec_group_comm(band_group_comm, Ngvecgroups); std::vector<int> gvec_groups(Ngvecgroups + 1, 0); FairDivideLow(Ngvecs, Ngvecgroups, gvec_groups); const int gvec_first = gvec_groups[gvec_group_comm.getGroupID()]; const int gvec_last = gvec_groups[gvec_group_comm.getGroupID() + 1]; // prepare Gvecs Ylm(G) typedef typename EinsplineSetBuilder::UnitCellType UnitCellType; Gvectors<double, UnitCellType> Gvecs(mybuilder->Gvecs[0], mybuilder->PrimCell, bspline->HalfG, gvec_first, gvec_last); // if(band_group_comm.isGroupLeader()) std::cout << "print band=" << iorb << " KE=" << Gvecs.evaluate_KE(cG) << std::endl; std::vector<AtomicOrbitals<DataType>>& centers = bspline->AtomicCenters; app_log() << "Transforming band " << iorb << " on Rank 0" << std::endl; // collect atomic centers by group std::vector<int> uniq_species; for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; int found_idx = -1; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { found_idx = idx; break; } if (found_idx < 0) uniq_species.push_back(my_GroupID); } // construct group list std::vector<std::vector<int>> group_list(uniq_species.size()); for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { group_list[idx].push_back(center_idx); break; } } for (int group_idx = 0; group_idx < group_list.size(); group_idx++) { const auto& mygroup = group_list[group_idx]; const double spline_radius = centers[mygroup[0]].getSplineRadius(); const int spline_npoints = centers[mygroup[0]].getSplineNpoints(); const int lmax = centers[mygroup[0]].getLmax(); const double delta = spline_radius / static_cast<double>(spline_npoints - 1); const int lm_tot = (lmax + 1) * (lmax + 1); const size_t natoms = mygroup.size(); const int policy = lm_tot > natoms ? 0 : 1; std::vector<std::complex<double>> i_power(lm_tot); // rotate phase is introduced here. std::complex<double> i_temp(rotate_phase_r, rotate_phase_i); for (size_t l = 0; l <= lmax; l++) { for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) i_power[lm] = i_temp; i_temp *= std::complex<double>(0.0, 1.0); } std::vector<Matrix<double>> all_vals(natoms); std::vector<std::vector<aligned_vector<double>>> vals_local(spline_npoints * omp_get_max_threads()); VectorSoaContainer<double, 3> myRSoA(natoms); for (size_t idx = 0; idx < natoms; idx++) { all_vals[idx].resize(spline_npoints, lm_tot * 2); all_vals[idx] = 0.0; myRSoA(idx) = centers[mygroup[idx]].getCenterPos(); } #pragma omp parallel { const size_t tid = omp_get_thread_num(); const size_t nt = omp_get_num_threads(); for (int ip = 0; ip < spline_npoints; ip++) { const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_local[ip_idx].resize(lm_tot * 2); for (size_t lm = 0; lm < lm_tot * 2; lm++) { auto& vals = vals_local[ip_idx][lm]; vals.resize(natoms); std::fill(vals.begin(), vals.end(), 0.0); } } else { vals_local[ip_idx].resize(natoms * 2); for (size_t iat = 0; iat < natoms * 2; iat++) { auto& vals = vals_local[ip_idx][iat]; vals.resize(lm_tot); std::fill(vals.begin(), vals.end(), 0.0); } } } const size_t size_pw_tile = 32; const size_t num_pw_tiles = (Gvecs.NumGvecs + size_pw_tile - 1) / size_pw_tile; aligned_vector<double> j_lm_G(lm_tot, 0.0); std::vector<aligned_vector<double>> phase_shift_r(size_pw_tile); std::vector<aligned_vector<double>> phase_shift_i(size_pw_tile); std::vector<aligned_vector<double>> YlmG(size_pw_tile); for (size_t ig = 0; ig < size_pw_tile; ig++) { phase_shift_r[ig].resize(natoms); phase_shift_i[ig].resize(natoms); YlmG[ig].resize(lm_tot); } SoaSphericalTensor<double> Ylm(lmax); #pragma omp for for (size_t tile_id = 0; tile_id < num_pw_tiles; tile_id++) { const size_t ig_first = tile_id * size_pw_tile; const size_t ig_last = std::min((tile_id + 1) * size_pw_tile, Gvecs.NumGvecs); for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate phase shift for all the centers of this group Gvecs.calc_phase_shift(myRSoA, ig, phase_shift_r[ig_local], phase_shift_i[ig_local]); Gvecs.calc_Ylm_G(ig, Ylm, YlmG[ig_local]); } for (int ip = 0; ip < spline_npoints; ip++) { double r = delta * static_cast<double>(ip); const size_t ip_idx = tid * spline_npoints + ip; for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate spherical bessel function Gvecs.calc_jlm_G(lmax, r, ig, j_lm_G); for (size_t lm = 0; lm < lm_tot; lm++) j_lm_G[lm] *= YlmG[ig_local][lm]; const double cG_r = cG[ig + gvec_first].real(); const double cG_i = cG[ig + gvec_first].imag(); if (policy == 1) { for (size_t lm = 0; lm < lm_tot; lm++) { double* restrict vals_r = vals_local[ip_idx][lm * 2].data(); double* restrict vals_i = vals_local[ip_idx][lm * 2 + 1].data(); const double* restrict ps_r_ptr = phase_shift_r[ig_local].data(); const double* restrict ps_i_ptr = phase_shift_i[ig_local].data(); double cG_j_r = cG_r * j_lm_G[lm]; double cG_j_i = cG_i * j_lm_G[lm]; #pragma omp simd aligned(vals_r, vals_i, ps_r_ptr, ps_i_ptr) for (size_t idx = 0; idx < natoms; idx++) { const double ps_r = ps_r_ptr[idx]; const double ps_i = ps_i_ptr[idx]; vals_r[idx] += cG_j_r * ps_r - cG_j_i * ps_i; vals_i[idx] += cG_j_i * ps_r + cG_j_r * ps_i; } } } else { for (size_t idx = 0; idx < natoms; idx++) { double* restrict vals_r = vals_local[ip_idx][idx * 2].data(); double* restrict vals_i = vals_local[ip_idx][idx * 2 + 1].data(); const double* restrict j_lm_G_ptr = j_lm_G.data(); double cG_ps_r = cG_r * phase_shift_r[ig_local][idx] - cG_i * phase_shift_i[ig_local][idx]; double cG_ps_i = cG_i * phase_shift_r[ig_local][idx] + cG_r * phase_shift_i[ig_local][idx]; #pragma omp simd aligned(vals_r, vals_i, j_lm_G_ptr) for (size_t lm = 0; lm < lm_tot; lm++) { const double jlm = j_lm_G_ptr[lm]; vals_r[lm] += cG_ps_r * jlm; vals_i[lm] += cG_ps_i * jlm; } } } } } } #pragma omp for collapse(2) for (int ip = 0; ip < spline_npoints; ip++) for (size_t idx = 0; idx < natoms; idx++) { double* vals = all_vals[idx][ip]; for (size_t tid = 0; tid < nt; tid++) for (size_t lm = 0; lm < lm_tot; lm++) { double vals_th_r, vals_th_i; const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_th_r = vals_local[ip_idx][lm * 2][idx]; vals_th_i = vals_local[ip_idx][lm * 2 + 1][idx]; } else { vals_th_r = vals_local[ip_idx][idx * 2][lm]; vals_th_i = vals_local[ip_idx][idx * 2 + 1][lm]; } const double real_tmp = 4.0 * M_PI * i_power[lm].real(); const double imag_tmp = 4.0 * M_PI * i_power[lm].imag(); vals[lm] += vals_th_r * real_tmp - vals_th_i * imag_tmp; vals[lm + lm_tot] += vals_th_i * real_tmp + vals_th_r * imag_tmp; } } } //app_log() << "Building band " << iorb << " at center " << center_idx << std::endl; for (size_t idx = 0; idx < natoms; idx++) { // reduce all_vals band_group_comm.reduce_in_place(all_vals[idx].data(), all_vals[idx].size()); if (!band_group_comm.isGroupLeader()) continue; #pragma omp parallel for for (int lm = 0; lm < lm_tot; lm++) { auto& mycenter = centers[mygroup[idx]]; aligned_vector<double> splineData_r(spline_npoints); UBspline_1d_d* atomic_spline_r; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_r[ip] = all_vals[idx][ip][lm]; atomic_spline_r = einspline::create(atomic_spline_r, 0.0, spline_radius, spline_npoints, splineData_r.data(), ((lm == 0) || (lm > 3))); if (!bspline->is_complex) { mycenter.set_spline(atomic_spline_r, lm, iorb); einspline::destroy(atomic_spline_r); } else { aligned_vector<double> splineData_i(spline_npoints); UBspline_1d_d* atomic_spline_i; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_i[ip] = all_vals[idx][ip][lm + lm_tot]; atomic_spline_i = einspline::create(atomic_spline_i, 0.0, spline_radius, spline_npoints, splineData_i.data(), ((lm == 0) || (lm > 3))); mycenter.set_spline(atomic_spline_r, lm, iorb * 2); mycenter.set_spline(atomic_spline_i, lm, iorb * 2 + 1); einspline::destroy(atomic_spline_r); einspline::destroy(atomic_spline_i); } } } #ifdef PRINT_RADIAL char fname[64]; sprintf(fname, "band_%d_center_%d_pw.dat", iorb, center_idx); FILE* fout_pw = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_v.dat", iorb, center_idx); FILE* fout_spline_v = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_g.dat", iorb, center_idx); FILE* fout_spline_g = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_l.dat", iorb, center_idx); FILE* fout_spline_l = fopen(fname, "w"); fprintf(fout_pw, "# r vals(lm)\n"); fprintf(fout_spline_v, "# r vals(lm)\n"); fprintf(fout_spline_g, "# r grads(lm)\n"); fprintf(fout_spline_l, "# r lapls(lm)\n"); // write to file for plotting for (int ip = 0; ip < spline_npoints - 1; ip++) { double r = delta * static_cast<double>(ip); mycenter.SplineInst->evaluate_vgl(r, mycenter.localV, mycenter.localG, mycenter.localL); fprintf(fout_pw, "%15.10lf ", r); fprintf(fout_spline_v, "%15.10lf ", r); fprintf(fout_spline_g, "%15.10lf ", r); fprintf(fout_spline_l, "%15.10lf ", r); for (int lm = 0; lm < lm_tot; lm++) { fprintf(fout_pw, "%15.10lf %15.10lf ", all_vals[center_idx][ip][lm].real(), all_vals[center_idx][ip][lm].imag()); fprintf(fout_spline_v, "%15.10lf %15.10lf ", mycenter.localV[lm * mycenter.Npad + iorb * 2], mycenter.localV[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_g, "%15.10lf %15.10lf ", mycenter.localG[lm * mycenter.Npad + iorb * 2], mycenter.localG[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_l, "%15.10lf %15.10lf ", mycenter.localL[lm * mycenter.Npad + iorb * 2], mycenter.localL[lm * mycenter.Npad + iorb * 2 + 1]); } fprintf(fout_pw, "\n"); fprintf(fout_spline_v, "\n"); fprintf(fout_spline_g, "\n"); fprintf(fout_spline_l, "\n"); } fclose(fout_pw); fclose(fout_spline_v); fclose(fout_spline_g); fclose(fout_spline_l); #endif } } }; } // namespace qmcplusplus #endif
pad.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_PAD_H_ #define MACE_KERNELS_PAD_H_ #include <algorithm> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" namespace mace { namespace kernels { template<DeviceType D, typename T> struct PadFunctor : OpKernel { PadFunctor(OpKernelContext *context, const std::vector<int> &paddings, const float constant_value) : OpKernel(context), paddings_(paddings), constant_value_(constant_value) {} MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK( this->paddings_.size() == static_cast<size_t>(input->dim_size()) * 2); auto input_shape = input->shape(); MACE_RETURN_IF_ERROR(output->Resize({input_shape[0] + this->paddings_[0] + this->paddings_[1], input_shape[1] + this->paddings_[2] + this->paddings_[3], input_shape[2] + this->paddings_[4] + this->paddings_[5], input_shape[3] + this->paddings_[6] + this->paddings_[7]})); Tensor::MappingGuard input_guard(input); Tensor::MappingGuard output_guard(output); auto input_ptr = input->data<T>(); T *output_ptr = output->mutable_data<T>(); std::fill(output_ptr, output_ptr + output->size(), this->constant_value_); const index_t batch = input->dim(0); const index_t channel = input->dim(1); const index_t height = input->dim(2); const index_t width = input->dim(3); #pragma omp parallel for collapse(3) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channel; ++c) { for (index_t h = 0; h < height; ++h) { const index_t in_offset = (((b * channel + c) * height) + h) * width; const index_t out_offset = (((b + this->paddings_[0]) * output->dim(1) + (c + this->paddings_[2])) * output->dim(2) + (h + this->paddings_[4])) * output->dim(3) + this->paddings_[6]; memcpy(output_ptr + out_offset, input_ptr + in_offset, width * sizeof(T)); } } } return MACE_SUCCESS; } std::vector<int> paddings_; float constant_value_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLPadKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLPadKernel); }; template <typename T> struct PadFunctor<DeviceType::GPU, T> : OpKernel { PadFunctor(OpKernelContext *context, const std::vector<int> &paddings, const float constant_value); MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLPadKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_PAD_H_
array_multiply_omp.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> /* <--- OpenMP --- */ int main(int argc, char **argv) { struct timespec ts_start, ts_end; int size = 1e8; int multiplier = 2; int *a, *c; int i; float time_total; /* Allocate memory for arrays */ a = malloc(size * sizeof(int)); c = malloc(size * sizeof(int)); /* Get start time */ clock_gettime(CLOCK_MONOTONIC, &ts_start); /* Multiply array a by multiplier */ #pragma omp parallel for /* <--- OpenMP --- */ for (i = 0; i < size; i++) { c[i] = multiplier * a[i]; } /* Get end time */ clock_gettime(CLOCK_MONOTONIC, &ts_end); time_total = (ts_end.tv_sec - ts_start.tv_sec) * 1e9 + (ts_end.tv_nsec - ts_start.tv_nsec); printf("Total time is %f ms\n", time_total / 1e6); }
rt_dgeqrf.c
#include "runtime.h" void RT_CORE_dgeqrt(Quark *quark, Quark_Task_Flags *task_flags, int m, int n, int ib, int nb, double *A, int lda, double *T, int ldt) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dgeqrt( quark, task_flags, m, n, ib, nb, A, lda, T, ldt); } else if (plasma->runtime == PLASMA_OMPSS) { /* */ double *TAU = malloc(nb*sizeof(double)); double *WORK = malloc(ib*nb*sizeof(double)); //#pragma omp register ([ib*nb]WORK) //#pragma omp register ([nb]TAU) //printf("\n\n DGEQRF BEFORE m %d n %d ib %d lda %d ldt %d\n", m, n, ib, lda, ldt); /* */ #pragma omp target device (smp) copy_deps #pragma omp task inout([lda*nb]A) out([ldt*nb]T) label(dgeqrt) CORE_dgeqrt(m, n, ib, A, lda, T, ldt, TAU, WORK); //printf("\n\n DGEQRF AFTER\n"); } }
Matrix.h
#pragma once #include <algorithm> #include <exception> #include <functional> #include <iostream> #include <omp.h> #include <stdexcept> #include <type_traits> #include <vector> namespace cppmath { template <typename T> class Matrix { static_assert(std::is_floating_point<T>::value, "An specialization of the matrix class has to be of a " "floating point type!"); public: using MatrixDataType = std::vector<std::vector<T>>; Matrix() = delete; Matrix(std::size_t rows, std::size_t cols); Matrix(std::size_t rows, std::size_t cols, const T &value); ~Matrix() noexcept = default; Matrix(const Matrix &other) = default; Matrix &operator=(const Matrix &other) = default; Matrix(Matrix &&other) noexcept = default; Matrix &operator=(Matrix &&other) noexcept = default; Matrix operator+(const Matrix &rhs); Matrix &operator+=(const Matrix &rhs); Matrix operator-(const Matrix &rhs); Matrix &operator-=(const Matrix &rhs); Matrix operator*(const T &scalar); Matrix &operator*=(const T &scalar); Matrix operator/(const T &scalar); Matrix &operator/=(const T &scalar); Matrix operator*(const Matrix &rhs); Matrix &operator*=(const Matrix &rhs); void dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result); void parallel_dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result); void print_matrix() const; std::size_t num_rows() const; std::size_t num_cols() const; private: std::size_t m_rows; std::size_t m_cols; MatrixDataType m_data; }; template <typename T> Matrix<T>::Matrix(std::size_t rows, std::size_t cols) : m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, 0)) { } template <typename T> Matrix<T>::Matrix(std::size_t rows, std::size_t cols, const T &value) : m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, value)) { } template <typename T> Matrix<T> Matrix<T>::operator+(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), result.m_data[i].begin(), std::plus<T>()); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator+=(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::plus<T>()); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator-(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), result.m_data[i].begin(), std::minus<T>()); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator-=(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::minus<T>()); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator*(const T &scalar) { Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T { return val * scalar; }); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator*=(const T &scalar) { for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T { return val * scalar; }); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator/(const T &scalar) { if (scalar == 0) { throw(std::overflow_error("You cannot divide by a scalar value of zero!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T { return val / scalar; }); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator/=(const T &scalar) { for (std::size_t i = 0; i != m_rows; ++i) { std::transform(m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T { return val / scalar; }); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator*(const Matrix<T> &rhs) { if (m_cols != rhs.m_rows) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, rhs.m_cols); if (m_rows < 250 && m_cols < 250) { dot(*this, rhs, result); } else { parallel_dot(*this, rhs, result); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator*=(const Matrix<T> &rhs) { if (m_cols != rhs.m_rows) { throw(std::invalid_argument("Number of cols are not equal!")); } *this = (*this) * rhs; return *this; } template <typename T> void Matrix<T>::dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result) { for (std::size_t i = 0; i != matrixA.m_rows; ++i) { for (std::size_t j = 0; j != matrixB.m_cols; ++j) { for (std::size_t k = 0; k != matrixB.m_rows; ++k) { result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j]; } } } } template <typename T> void Matrix<T>::parallel_dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result) { std::size_t i = 0; std::size_t j = 0; std::size_t k = 0; #pragma omp parallel for shared(result) private(i, j, k) num_threads(4) for (i = 0; i != matrixA.m_rows; ++i) { for (j = 0; j != matrixB.m_cols; ++j) { for (k = 0; k != matrixB.m_rows; ++k) { result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j]; } } } } template <typename T> void Matrix<T>::print_matrix() const { for (std::size_t i = 0; i < m_rows; ++i) { for (std::size_t j = 0; j < m_cols; ++j) { std::cout << m_data[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; } template <typename T> std::size_t Matrix<T>::num_rows() const { return m_rows; } template <typename T> std::size_t Matrix<T>::num_cols() const { return m_cols; } } // namespace cppmath
matmult.c
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "matmult_initialize.h" #ifdef TAU_MPI int provided; #include <mpi.h> /* NOTE: MPI is just used to spawn multiple copies of the kernel to different ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #define APP_USE_INLINE_MULTIPLY 1 #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ double** allocateMatrix(int rows, int cols) { int i; double **matrix = (double**)malloc((sizeof(double*)) * rows); for (i=0; i<rows; i++) { matrix[i] = (double*)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double** matrix, int rows, int cols) { int i; for (i=0; i<rows; i++) { free(matrix[i]); } free(matrix); } double multiply(double a, double b) { return a * b; } #ifdef TAU_OPENMP // cols_a and rows_b are the same value void compute_nested(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; double tmp = 0.0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp parallel for private(i,j,k) shared(a,b,c) for (i=0; i<rows_a; i++) { { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else tmp = a[i][k]; tmp = tmp * b[k][j]; c[i][j] += tmp; #endif } } } } } #endif // cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for(j=0; j<cols_b; j++) { for (k=0; k<cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #if defined(TAU_OPENMP) //if (omp_get_nested()) { compute_nested(a, b, c, NRA, NCA, NCB); //} #endif #ifdef TAU_MPI if (provided == MPI_THREAD_MULTIPLE) { printf("provided is MPI_THREAD_MULTIPLE\n"); } else if (provided == MPI_THREAD_FUNNELED) { printf("provided is MPI_THREAD_FUNNELED\n"); } #endif /* TAU_MPI */ compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0 ; i < 100000000 ; i++) { sum = sum+i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; // compute do_work(); #ifdef APP_DO_LOCK_TEST // test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr,"Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr,"Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void*) 0); //return NULL; #endif // APP_DO_LOCK_TEST } #endif // PTHREADS int main (int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } #endif /* TAU_MPI */ #ifdef PTHREADS if (ret = pthread_create(&tid1, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid2, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid3, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ /* On thread 0: */ int i; //for (i = 0 ; i < 100 ; i++) { do_work(); //} #ifdef PTHREADS if (ret = pthread_join(tid1, NULL) ) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid2, NULL) ) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid3, NULL) ) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf ("Done.\n"); return 0; }
d2d_memcpy.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-aarch64-unknown-linux-gnu | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64-ibm-linux-gnu | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64le-ibm-linux-gnu | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-x86_64-pc-linux-gnu | %fcheck-x86_64-pc-linux-gnu -allow-empty // RUN: %libomptarget-compile-nvptx64-nvidia-cuda && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-nvptx64-nvidia-cuda | %fcheck-nvptx64-nvidia-cuda -allow-empty #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> const int magic_num = 7; int main(int argc, char *argv[]) { const int N = 128; const int num_devices = omp_get_num_devices(); // No target device, just return if (num_devices == 0) { printf("PASS\n"); return 0; } const int src_device = 0; int dst_device = num_devices - 1; int length = N * sizeof(int); int *src_ptr = omp_target_alloc(length, src_device); int *dst_ptr = omp_target_alloc(length, dst_device); assert(src_ptr && "src_ptr is NULL"); assert(dst_ptr && "dst_ptr is NULL"); #pragma omp target teams distribute parallel for device(src_device) \ is_device_ptr(src_ptr) for (int i = 0; i < N; ++i) { src_ptr[i] = magic_num; } int rc = omp_target_memcpy(dst_ptr, src_ptr, length, 0, 0, dst_device, src_device); assert(rc == 0 && "error in omp_target_memcpy"); int *buffer = malloc(length); assert(buffer && "failed to allocate host buffer"); #pragma omp target teams distribute parallel for device(dst_device) \ map(from: buffer[0:N]) is_device_ptr(dst_ptr) for (int i = 0; i < N; ++i) { buffer[i] = dst_ptr[i] + magic_num; } for (int i = 0; i < N; ++i) assert(buffer[i] == 2 * magic_num); printf("PASS\n"); // Free host and device memory free(buffer); omp_target_free(src_ptr, src_device); omp_target_free(dst_ptr, dst_device); return 0; } // CHECK: PASS
gemm.c
// This file is generated from test alphabets program by code generator in alphaz // To compile this code, use -lm option for math library. // Includes #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <limits.h> #include <float.h> #include <omp.h> #include <immintrin.h> #include <malloc.h> // Common Macros #define max(x, y) ((x)>(y) ? (x) : (y)) #define MAX(x, y) ((x)>(y) ? (x) : (y)) #define min(x, y) ((x)>(y) ? (y) : (x)) #define MIN(x, y) ((x)>(y) ? (y) : (x)) #define CEILD(n,d) (int)ceil(((float)(n))/((float)(d))) #define ceild(n,d) (int)ceil(((float)(n))/((float)(d))) #define FLOORD(n,d) (int)floor(((float)(n))/((float)(d))) #define floord(n,d) (int)floor(((float)(n))/((float)(d))) #define CDIV(x,y) CEILD((x),(y)) #define div(x,y) CDIV((x),(y)) #define FDIV(x,y) FLOORD((x),(y)) #define LB_SHIFT(b,s) ((int)ceild(b,s) * s) #define MOD(i,j) ((i)%(j)) #define mallocCheck(v,s,d) if ((v) == NULL) { printf("Failed to allocate memory for %s : size=%lu\n", "sizeof(d)*(s)", sizeof(d)*(s)); exit(-1); } // Reduction Operators #define RADD(x,y) ((x)+=(y)) #define RMUL(x,y) ((x)*=(y)) #define RMAX(x,y) ((x)=MAX((x),(y))) #define RMIN(x,y) ((x)=MIN((x),(y))) // Common functions for min and max //functions for integer max inline int __max_int(int x, int y){ return ((x)>(y) ? (x) : (y)); } inline short __max_short(short x, short y){ return ((x)>(y) ? (x) : (y)); } inline long __max_long(long x, long y){ return ((x)>(y) ? (x) : (y)); } inline unsigned int __max_unsigned_int(unsigned int x, unsigned int y){ return ((x)>(y) ? (x) : (y)); } inline unsigned short __max_unsigned_short(unsigned short x, unsigned short y){ return ((x)>(y) ? (x) : (y)); } //function for float max inline float __max_float(float x, float y){ return ((x)>(y) ? (x) : (y)); } //function for integer min inline int __min_int(int x, int y){ return ((x)>(y) ? (y) : (x)); } inline short __min_short(short x, short y){ return ((x)>(y) ? (y) : (x)); } inline long __min_long(long x, long y){ return ((x)>(y) ? (y) : (x)); } inline unsigned int __min_unsigned_int(unsigned int x, unsigned int y){ return ((x)>(y) ? (y) : (x)); } inline unsigned short __min_unsigned_short(unsigned short x, unsigned short y){ return ((x)>(y) ? (y) : (x)); } inline unsigned long __min_unsigned_long(unsigned long x, unsigned long y){ return ((x)>(y) ? (y) : (x)); } inline float __min_float(float x, float y){ return ((x)>(y) ? (y) : (x)); } //Memory Macros #define A(i,j) A[(i) * (Q) + j] #define B(i,j) B[(i) * (R) + j] #define Cout(i,j) Cout[(i) * (R) + j] #define Acc(i,j) Acc[(i) * (R) + j] void gemm(long P, long Q, long R, long ts1_l1, long ts2_l1, long ts3_l1, float* alpha, float* beta, float* A, float* B, float* Cout){ ///Parameter checking if (!((P >= 2 && Q >= 2 && R >= 2 && ts1_l1 > 0 && ts2_l1 > 0 && ts3_l1 > 0))) { printf("The value of parameters are not valid.\n"); exit(-1); } //Memory Allocation float* Acc = (float*)malloc(sizeof(float)*((P) * (R))); mallocCheck(Acc, ((P) * (R)), float); #define S1(i,j,k) Acc(i,k) = (Acc(i,k))+((A(i,j))*(B(j,k))) #define S2(i,j,k) Acc(i,k) = (A(i,j))*(B(j,k)) #define S0(i,j,i2) Cout(i,i2) = ((*alpha)*(Acc(i,i2)))+((*beta)*(Cout(i,i2))) { //Domain //{i,j,k|P>=2 && R>=2 && i>=0 && P>=i+1 && k>=0 && R>=k+1 && Q>=j+1 && j>=1 && Q>=2} //{i,j,k|j==0 && P>=2 && Q>=2 && R>=2 && i>=0 && P>=i+1 && k>=0 && R>=k+1} //{i,j,i2|j==Q-1 && i>=0 && P>=i+1 && Q>=2 && R>=i2+1 && P>=2 && i2>=0 && R>=2} int ti1_l1,ti2_l1,ti3_l1,start_l1_d0,end_l1_d0,time_l1_d0,c1,c2,c3; if ((Q >= 3)) { { { start_l1_d0 = INT_MAX; end_l1_d0 = INT_MIN; ti1_l1 = (ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1); ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = Q-1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti1_l1 = P-1; ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = Q-1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); for(time_l1_d0=start_l1_d0;time_l1_d0 <= end_l1_d0;time_l1_d0+=1) { #pragma omp parallel for private(c1,c2,c3,ti1_l1,ti2_l1,ti3_l1) schedule(static ,1) for(ti1_l1=(ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1);ti1_l1 <= P-1;ti1_l1+=ts1_l1) { for(ti2_l1=(ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1);ti2_l1 <= Q-1;ti2_l1+=ts2_l1) { ti3_l1 = (time_l1_d0 + ((ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1)) * (-1)) * (ts3_l1); if (((ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1) <= ti3_l1 && ti3_l1 <= R-1)) { //guard that isolates selected statements for generic point loops if ((0 < ti2_l1 && ti2_l1+ts2_l1 < Q)) { //full-tile guard if ((0 <= ti1_l1 && ti1_l1+ts1_l1 <= P && 0 <= ti2_l1-1 && ti2_l1+ts2_l1 <= Q-1 && 0 <= ti3_l1 && ti3_l1+ts3_l1 <= R)) { for(c1=ti1_l1;c1 <= ti1_l1+ts1_l1-1;c1+=1) { for(c2=ti2_l1;c2 <= ti2_l1+ts2_l1-1;c2+=1) { //#pragma ivdep //#pragma vector always for(c3=ti3_l1;c3 <= ti3_l1+ts3_l1-1;c3+=1) { S1((c1),(c2),(c3)); } } } } else { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,Q-2);c2+=1) { //#pragma ivdep //#pragma vector always for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(c2),(c3)); } } } } } else { { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,0);c2 <= min(ti2_l1+ts2_l1-1,0);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S2((c1),(0),(c3)); } } for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,Q-2);c2+=1) { //#pragma ivdep //#pragma vector always for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(c2),(c3)); } } for(c2=max(ti2_l1,Q-1);c2 <= min(ti2_l1+ts2_l1-1,Q-1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(Q-1),(c3)); S0((c1),(Q-1),(c3)); } } } } } } } } } } } } if (Q == 2) { { { start_l1_d0 = INT_MAX; end_l1_d0 = INT_MIN; ti1_l1 = (ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1); ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = 1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti1_l1 = P-1; ti2_l1 = (ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1); ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti2_l1 = 1; ti3_l1 = (ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1); start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); ti3_l1 = R-1; start_l1_d0 = min(start_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); end_l1_d0 = max(end_l1_d0,(ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1) + (ti3_l1)/(ts3_l1)); for(time_l1_d0=start_l1_d0;time_l1_d0 <= end_l1_d0;time_l1_d0+=1) { #pragma omp parallel for private(c1,c2,c3,ti1_l1,ti2_l1,ti3_l1) schedule(static ,1) for(ti1_l1=(ceild((-ts1_l1+1),(ts1_l1))) * (ts1_l1);ti1_l1 <= P-1;ti1_l1+=ts1_l1) { for(ti2_l1=(ceild((-ts2_l1+1),(ts2_l1))) * (ts2_l1);ti2_l1 <= 1;ti2_l1+=ts2_l1) { ti3_l1 = (time_l1_d0 + ((ti1_l1)/(ts1_l1) + (ti2_l1)/(ts2_l1)) * (-1)) * (ts3_l1); if (((ceild((-ts3_l1+1),(ts3_l1))) * (ts3_l1) <= ti3_l1 && ti3_l1 <= R-1)) { //guard that isolates selected statements for generic point loops if (0 < ti2_l1) { //full-tile guard if (0 <= ti1_l1 && ti1_l1+ts1_l1 <= P && 0 <= ti2_l1-1 && ti2_l1+ts2_l1 <= 2 && 0 <= ti3_l1 && ti3_l1+ts3_l1 <= R) { for(c1=ti1_l1;c1 <= ti1_l1+ts1_l1-1;c1+=1) { for(c2=ti2_l1;c2 <= ti2_l1+ts2_l1-1;c2+=1) { for(c3=ti3_l1;c3 <= ti3_l1+ts3_l1-1;c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } else { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } } else { { for(c1=max(ti1_l1,0);c1 <= min(ti1_l1+ts1_l1-1,P-1);c1+=1) { for(c2=max(ti2_l1,0);c2 <= min(ti2_l1+ts2_l1-1,0);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S2((c1),(0),(c3)); } } for(c2=max(ti2_l1,1);c2 <= min(ti2_l1+ts2_l1-1,1);c2+=1) { for(c3=max(ti3_l1,0);c3 <= min(ti3_l1+ts3_l1-1,R-1);c3+=1) { S1((c1),(1),(c3)); S0((c1),(1),(c3)); } } } } } } } } } } } } } #undef S1 #undef S2 #undef S0 //Memory Free free(Acc); } //Memory Macros #undef A #undef B #undef Cout #undef Acc //Common Macro undefs #undef max #undef MAX #undef min #undef MIN #undef CEILD #undef ceild #undef FLOORD #undef floord #undef CDIV #undef FDIV #undef LB_SHIFT #undef MOD #undef RADD #undef RMUL #undef RMAX #undef RMIN
ex1.c
#include <stdio.h> #include <omp.h> # define PAD 8 #define NUM_THREADS 4 static long num_steps = 100000; double step; int main() { double pi, sum[NUM_THREADS][PAD]; int i, ts_num; step = 1.0/(double)num_steps; // set number of used threads omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, ts_nums, id; double x; // get curretn thread ID id = omp_get_thread_num(); // get threads num ts_nums = omp_get_num_threads(); // only first thread is specyfing the threads numbers if (id == 0) ts_num = ts_nums; for(i=id, sum[id][0]=0.0;i < num_steps;i=i+ts_nums) { x = (i+0.5)*step; sum[id][0] += 4.0/(1.0+x*x); } } // end of OMP PARALLEL for(i=0,pi=0.0; i<ts_num; i++) pi += sum[i][0]*step; printf("pi is %f\n", pi); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
variational_distance_calculation_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // // #if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED ) #define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "containers/model.h" #include "includes/kratos_flags.h" #include "elements/distance_calculation_element_simplex.h" #include "linear_solvers/linear_solver.h" #include "processes/process.h" #include "modeler/connectivity_preserve_modeler.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "utilities/variable_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function mantaining as much as possible the position of the zero of the function prior to the call. This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2 with the restriction that "distance" is a finite elment function */ template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > class VariationalDistanceCalculationProcess : public Process { public: KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1); KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS); KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); ///@name Type Definitions ///@{ typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typedef typename SchemeType::Pointer SchemePointerType; typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType; typedef SolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType; ///@} ///@name Pointer Definitions /// Pointer definition of VariationalDistanceCalculationProcess KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess); ///@} ///@name Life Cycle ///@{ /**This process recomputed the distance function mantaining the zero of the existing distance distribution * for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain * alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance * respecting that zero * @param base_model_parr - is the model part on the top of which the calculation will be performed * @param plinear_solver - linear solver to be used internally * @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process. * - can also be set to 0 if a (very) rough approximation is enough * * EXAMPLE OF USAGE FROM PYTHON: * class distance_linear_solver_settings: solver_type = "AMGCL" tolerance = 1E-3 max_iteration = 200 scaling = False krylov_type = "CG" smoother_type = "SPAI0" verbosity = 0 import linear_solver_factory distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings) max_iterations=1 distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations) distance_calculator.Execute() */ VariationalDistanceCalculationProcess( ModelPart& base_model_part, typename TLinearSolver::Pointer plinear_solver, unsigned int max_iterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart" ) : mdistance_part_is_initialized(false), mmax_iterations(max_iterations), mr_base_model_part( base_model_part ), mOptions( Options ), mAuxModelPartName( AuxPartName ) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(base_model_part); auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(plinear_solver); InitializeSolutionStrategy(plinear_solver, p_builder_solver); KRATOS_CATCH("") } /// Constructor with custom Builder And Solver /** To be used in the trilinos version, since the trilinos builder and * solver needs additional data (the EpetraComm). * @param rBaseModelPart Reference ModelPart for distance calculation. * @param pLinearSolver Linear solver for the distance system. * @param MaxIterations Maximum number of non-linear optimization iterations. * @param Options Configuration flags for the procedure. * @param AuxPartName Name to be used for the internal distance calculation ModelPart. */ VariationalDistanceCalculationProcess( ModelPart& rBaseModelPart, typename TLinearSolver::Pointer pLinearSolver, BuilderSolverPointerType pBuilderAndSolver, unsigned int MaxIterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart" ) : mdistance_part_is_initialized(false), mmax_iterations(MaxIterations), mr_base_model_part( rBaseModelPart ), mOptions( Options ), mAuxModelPartName( AuxPartName ) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(rBaseModelPart); InitializeSolutionStrategy(pLinearSolver, pBuilderAndSolver); KRATOS_CATCH("") } /// Destructor. ~VariationalDistanceCalculationProcess() override { Model& current_model = mr_base_model_part.GetModel(); if(current_model.HasModelPart( mAuxModelPartName )) current_model.DeleteModelPart( mAuxModelPartName ); }; ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ void Execute() override { KRATOS_TRY; if(mdistance_part_is_initialized == false){ ReGenerateDistanceModelPart(mr_base_model_part); } Model& current_model = mr_base_model_part.GetModel(); ModelPart& r_distance_model_part = current_model.GetModelPart( mAuxModelPartName ); // TODO: check flag PERFORM_STEP1 // Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1); // Unfix the distances const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; double& d = it_node->FastGetSolutionStepValue(DISTANCE); double& fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE); // Free the DISTANCE values fix_flag = 1.0; it_node->Free(DISTANCE); // Save the distances it_node->SetValue(DISTANCE, d); if(d == 0){ d = 1.0e-15; fix_flag = -1.0; it_node->Fix(DISTANCE); } else { if(d > 0.0){ d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances } else { d = -1.0e15; } } } const int nelem = static_cast<int>(r_distance_model_part.NumberOfElements()); #pragma omp parallel for for(int i_elem = 0; i_elem < nelem; ++i_elem){ auto it_elem = r_distance_model_part.ElementsBegin() + i_elem; array_1d<double,TDim+1> distances; auto& geom = it_elem->GetGeometry(); for(unsigned int i=0; i<TDim+1; i++){ distances[i] = geom[i].GetValue(DISTANCE); } const array_1d<double,TDim+1> original_distances = distances; // The element is cut by the interface if(this->IsSplit(distances)){ // Compute the unsigned distance using GeometryUtils if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) { GeometryUtils::CalculateExactDistancesToPlane(geom, distances); } else { if(TDim==3){ GeometryUtils::CalculateTetrahedraDistances(geom, distances); } else { GeometryUtils::CalculateTriangleDistances(geom, distances); } } // Assign the sign using the original distance values for(unsigned int i = 0; i < TDim+1; ++i){ if(original_distances[i] < 0){ distances[i] = -distances[i]; } } for(unsigned int i = 0; i < TDim+1; ++i){ double &d = geom[i].FastGetSolutionStepValue(DISTANCE); double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE); geom[i].SetLock(); if(std::abs(d) > std::abs(distances[i])){ d = distances[i]; } fix_flag = -1.0; geom[i].Fix(DISTANCE); geom[i].UnSetLock(); } } } // SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS... this->SynchronizeFixity(); this->SynchronizeDistance(); // Compute the maximum and minimum distance for the fixed nodes double max_dist = 0.0; double min_dist = 0.0; for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->IsFixed(DISTANCE)){ const double& d = it_node->FastGetSolutionStepValue(DISTANCE); if(d > max_dist){ max_dist = d; } if(d < min_dist){ min_dist = d; } } } // Synchronize the maximum and minimum distance values const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator(); max_dist = r_communicator.MaxAll(max_dist); min_dist = r_communicator.MinAll(min_dist); // Assign the max dist to all of the non-fixed positive nodes // and the minimum one to the non-fixed negatives #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(!it_node->IsFixed(DISTANCE)){ double& d = it_node->FastGetSolutionStepValue(DISTANCE); if(d>0){ d = max_dist; } else { d = min_dist; } } } mp_solving_strategy->Solve(); // Step2 - minimize the target residual r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2); for(unsigned int it = 0; it<mmax_iterations; it++){ mp_solving_strategy->Solve(); } // Unfix the distances #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = (r_distance_model_part.NodesBegin()) + i_node; it_node->Free(DISTANCE); } KRATOS_CATCH("") } virtual void Clear() { Model& current_model = mr_base_model_part.GetModel(); ModelPart& r_distance_model_part = current_model.GetModelPart( mAuxModelPartName ); r_distance_model_part.Nodes().clear(); r_distance_model_part.Conditions().clear(); r_distance_model_part.Elements().clear(); // r_distance_model_part.GetProcessInfo().clear(); mdistance_part_is_initialized = false; mp_solving_strategy->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "VariationalDistanceCalculationProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "VariationalDistanceCalculationProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mdistance_part_is_initialized; unsigned int mmax_iterations; ModelPart& mr_base_model_part; Flags mOptions; std::string mAuxModelPartName; typename SolvingStrategyType::UniquePointer mp_solving_strategy; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void ValidateInput() { const DataCommunicator& r_comm = mr_base_model_part.GetCommunicator().GetDataCommunicator(); int num_elements = mr_base_model_part.NumberOfElements(); int num_nodes = mr_base_model_part.NumberOfNodes(); if (num_elements > 0) { const auto geometry_family = mr_base_model_part.ElementsBegin()->GetGeometry().GetGeometryFamily(); KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::Kratos_Triangle) ) << "In 2D the element type is expected to be a triangle." << std::endl; KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::Kratos_Tetrahedra) ) << "In 3D the element type is expected to be a tetrahedron" << std::endl; } KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl; KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl; // Check that required nodal variables are present VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mr_base_model_part.Nodes()); VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mr_base_model_part.Nodes()); } void InitializeSolutionStrategy( typename TLinearSolver::Pointer pLinearSolver, BuilderSolverPointerType pBuilderAndSolver) { // Generate a linear strategy auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >(); Model& r_model = mr_base_model_part.GetModel(); ModelPart& r_distance_model_part = r_model.GetModelPart( mAuxModelPartName ); bool CalculateReactions = false; bool ReformDofAtEachIteration = false; bool CalculateNormDxFlag = false; mp_solving_strategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >( r_distance_model_part, p_scheme, pLinearSolver, pBuilderAndSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag); // TODO: check flag DO_EXPENSIVE_CHECKS mp_solving_strategy->Check(); } virtual void ReGenerateDistanceModelPart(ModelPart& r_base_model_part) { KRATOS_TRY Model& current_model = r_base_model_part.GetModel(); if(current_model.HasModelPart( mAuxModelPartName )) current_model.DeleteModelPart( mAuxModelPartName ); // Ensure that the nodes have distance as a DOF VariableUtils().AddDof<Variable<double> >(DISTANCE, r_base_model_part); // Generate ModelPart& r_distance_model_part = current_model.CreateModelPart( mAuxModelPartName ); Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >(); ConnectivityPreserveModeler modeler; modeler.GenerateModelPart(r_base_model_part, r_distance_model_part, *p_distance_element); // Using the conditions to mark the boundary with the flag boundary // Note that we DO NOT add the conditions to the model part VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes()); // Note that above we have assigned the same geometry. Thus the flag is // set in the distance model part despite we are iterating the base one for (auto it_cond = r_base_model_part.ConditionsBegin(); it_cond != r_base_model_part.ConditionsEnd(); ++it_cond){ Geometry< Node<3> >& geom = it_cond->GetGeometry(); for(unsigned int i=0; i<geom.size(); i++){ geom[i].Set(BOUNDARY,true); } } r_base_model_part.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY); mdistance_part_is_initialized = true; KRATOS_CATCH("") } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ bool IsSplit(const array_1d<double,TDim+1> &rDistances){ unsigned int positives = 0, negatives = 0; for(unsigned int i = 0; i < TDim+1; ++i){ if(rDistances[i] >= 0){ ++positives; } else { ++negatives; } } if (positives > 0 && negatives > 0){ return true; } return false; } void SynchronizeDistance(){ Model& current_model = mr_base_model_part.GetModel(); ModelPart& r_distance_model_part = current_model.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Set the distance absolute value #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE)); } // Synchronize the unsigned value to minimum r_communicator.SynchronizeCurrentDataToMin(DISTANCE); // Set the distance sign again by retrieving it from the non-historical database #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->GetValue(DISTANCE) < 0.0){ it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE); } } } } void SynchronizeFixity(){ Model& current_model = mr_base_model_part.GetModel(); ModelPart& r_distance_model_part = current_model.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Synchronize the fixity flag variable to minium // (-1.0 means fixed and 1.0 means free) r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE); // Set the fixity according to the synchronized flag #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE); if (r_fix_flag == -1.0){ it_node->Fix(DISTANCE); } } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther); /// Copy constructor. //VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther); ///@} }; // Class VariationalDistanceCalculationProcess //avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2)); ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::istream& operator >> (std::istream& rIStream, VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis); /// output stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::ostream& operator << (std::ostream& rOStream, const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from RGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToXYZ(const Quantum red,const Quantum green, const Quantum blue,double *X,double *Y,double *Z) { double b, g, r; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); r=QuantumScale*red; if (r > 0.04045) r=pow((r+0.055)/1.055,2.4); else r/=12.92; g=QuantumScale*green; if (g > 0.04045) g=pow((g+0.055)/1.055,2.4); else g/=12.92; b=QuantumScale*blue; if (b > 0.04045) b=pow((b+0.055)/1.055,2.4); else b/=12.92; *X=0.4124240*r+0.3575790*g+0.1804640*b; *Y=0.2126560*r+0.7151580*g+0.0721856*b; *Z=0.0193324*r+0.1191930*g+0.9504440*b; } static double LabF1(double alpha) { if (alpha <= ((24.0/116.0)*(24.0/116.0)*(24.0/116.0))) return((841.0/108.0)*alpha+(16.0/116.0)); return(pow(alpha,1.0/3.0)); } static inline void ConvertXYZToLab(const double X,const double Y,const double Z, double *L,double *a,double *b) { #define D50X (0.9642) #define D50Y (1.0) #define D50Z (0.8249) double fx, fy, fz; assert(L != (double *) NULL); assert(a != (double *) NULL); assert(b != (double *) NULL); *L=0.0; *a=0.5; *b=0.5; if ((X == 0.0) && (Y == 0.0) && (Z == 0.0)) return; fx=LabF1(X/D50X); fy=LabF1(Y/D50Y); fz=LabF1(Z/D50Z); *L=(116.0*fy-16.0)/100.0; *a=(500.0*(fx-fy))/255.0; if (*a < 0.0) *a+=1.0; *b=(200.0*(fy-fz))/255.0; if (*b < 0.0) *b+=1.0; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status, sync; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != RGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); switch (image->colorspace) { case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: case RGBColorspace: case TransparentColorspace: break; default: { (void) TransformImageColorspace(image,image->colorspace); break; } } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYColorspace: { /* Convert RGB to CMY colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetRedPixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetRedPixelComponent(q)))); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetGreenPixelComponent(q)))); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetBluePixelComponent(q)))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; return(status); } case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; return(status); } case HSBColorspace: { /* Transform image from RGB to HSB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double brightness, hue, saturation; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } hue=0.0; saturation=0.0; brightness=0.0; for (x=0; x < (ssize_t) image->columns; x++) { ConvertRGBToHSB(GetRedPixelComponent(q),GetGreenPixelComponent(q), GetBluePixelComponent(q),&hue,&saturation,&brightness); SetRedPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*hue)); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*saturation)); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*brightness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case HSLColorspace: { /* Transform image from RGB to HSL. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double hue, lightness, saturation; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } hue=0.0; saturation=0.0; lightness=0.0; for (x=0; x < (ssize_t) image->columns; x++) { ConvertRGBToHSL(GetRedPixelComponent(q),GetGreenPixelComponent(q), GetBluePixelComponent(q),&hue,&saturation,&lightness); SetRedPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*hue)); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*saturation)); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*lightness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case HWBColorspace: { /* Transform image from RGB to HWB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blackness, hue, whiteness; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } hue=0.0; whiteness=0.0; blackness=0.0; for (x=0; x < (ssize_t) image->columns; x++) { ConvertRGBToHWB(GetRedPixelComponent(q),GetGreenPixelComponent(q), GetBluePixelComponent(q),&hue,&whiteness,&blackness); SetRedPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*hue)); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*whiteness)); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*blackness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case LabColorspace: { /* Transform image from RGB to Lab. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double a, b, L, X, Y, Z; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } L=0.0; a=0.0; b=0.0; X=0.0; Y=0.0; Z=0.0; for (x=0; x < (ssize_t) image->columns; x++) { ConvertRGBToXYZ(GetRedPixelComponent(q),GetGreenPixelComponent(q), GetBluePixelComponent(q),&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,&L,&a,&b); SetRedPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*L)); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*a)); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) QuantumRange*b)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=1.0/InterpretLocaleValue(value,(char **) NULL) != 0.0 ? InterpretLocaleValue(value,(char **) NULL) : 1.0; film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=InterpretLocaleValue(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=InterpretLocaleValue(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=InterpretLocaleValue(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)* 0.002/film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+((MagickRealType) i/MaxMap)*(1.0-black))/((gamma/density)* 0.002/film_gamma))/1024.0)); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { SetRedPixelComponent(q,logmap[ScaleQuantumToMap( GetRedPixelComponent(q))]); SetGreenPixelComponent(q,logmap[ScaleQuantumToMap( GetGreenPixelComponent(q))]); SetBluePixelComponent(q,logmap[ScaleQuantumToMap( GetBluePixelComponent(q))]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.33333f*(MagickRealType) i; y_map[i].x=0.33334f*(MagickRealType) i; z_map[i].x=0.33333f*(MagickRealType) i; x_map[i].y=0.50000f*(MagickRealType) i; y_map[i].y=0.00000f*(MagickRealType) i; z_map[i].y=(-0.50000f)*(MagickRealType) i; x_map[i].z=(-0.25000f)*(MagickRealType) i; y_map[i].z=0.50000f*(MagickRealType) i; z_map[i].z=(-0.25000f)*(MagickRealType) i; } break; } case Rec601LumaColorspace: case GRAYColorspace: { /* Initialize Rec601 luma tables: G = 0.29900*R+0.58700*G+0.11400*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.29900f*(MagickRealType) i; y_map[i].x=0.58700f*(MagickRealType) i; z_map[i].x=0.11400f*(MagickRealType) i; x_map[i].y=0.29900f*(MagickRealType) i; y_map[i].y=0.58700f*(MagickRealType) i; z_map[i].y=0.11400f*(MagickRealType) i; x_map[i].z=0.29900f*(MagickRealType) i; y_map[i].z=0.58700f*(MagickRealType) i; z_map[i].z=0.11400f*(MagickRealType) i; } image->type=GrayscaleType; break; } case Rec601YCbCrColorspace: case YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.299000*R+0.587000*G+0.114000*B Cb= -0.168736*R-0.331264*G+0.500000*B Cr= 0.500000*R-0.418688*G-0.081312*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.299000f*(MagickRealType) i; y_map[i].x=0.587000f*(MagickRealType) i; z_map[i].x=0.114000f*(MagickRealType) i; x_map[i].y=(-0.168730f)*(MagickRealType) i; y_map[i].y=(-0.331264f)*(MagickRealType) i; z_map[i].y=0.500000f*(MagickRealType) i; x_map[i].z=0.500000f*(MagickRealType) i; y_map[i].z=(-0.418688f)*(MagickRealType) i; z_map[i].z=(-0.081312f)*(MagickRealType) i; } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.21260*R+0.71520*G+0.07220*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.21260f*(MagickRealType) i; y_map[i].x=0.71520f*(MagickRealType) i; z_map[i].x=0.07220f*(MagickRealType) i; x_map[i].y=0.21260f*(MagickRealType) i; y_map[i].y=0.71520f*(MagickRealType) i; z_map[i].y=0.07220f*(MagickRealType) i; x_map[i].z=0.21260f*(MagickRealType) i; y_map[i].z=0.71520f*(MagickRealType) i; z_map[i].z=0.07220f*(MagickRealType) i; } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212600*R+0.715200*G+0.072200*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.212600f*(MagickRealType) i; y_map[i].x=0.715200f*(MagickRealType) i; z_map[i].x=0.072200f*(MagickRealType) i; x_map[i].y=(-0.114572f)*(MagickRealType) i; y_map[i].y=(-0.385428f)*(MagickRealType) i; z_map[i].y=0.500000f*(MagickRealType) i; x_map[i].z=0.500000f*(MagickRealType) i; y_map[i].z=(-0.454153f)*(MagickRealType) i; z_map[i].z=(-0.045847f)*(MagickRealType) i; } break; } case sRGBColorspace: { /* Linear sRGB to nonlinear RGB (http://www.w3.org/Graphics/Color/sRGB): R = 1.0*R+0.0*G+0.0*B G = 0.0*R+0.1*G+0.0*B B = 0.0*R+0.0*G+1.0*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { MagickRealType v; v=(MagickRealType) i/(MagickRealType) MaxMap; if (((MagickRealType) i/(MagickRealType) MaxMap) <= 0.04045f) v/=12.92f; else v=(MagickRealType) pow((((double) i/MaxMap)+0.055)/1.055,2.4); x_map[i].x=1.0f*MaxMap*v; y_map[i].x=0.0f*MaxMap*v; z_map[i].x=0.0f*MaxMap*v; x_map[i].y=0.0f*MaxMap*v; y_map[i].y=1.0f*MaxMap*v; z_map[i].y=0.0f*MaxMap*v; x_map[i].z=0.0f*MaxMap*v; y_map[i].z=0.0f*MaxMap*v; z_map[i].z=1.0f*MaxMap*v; } break; } case XYZColorspace: { /* Initialize CIE XYZ tables (ITU-R 709 RGB): X = 0.4124564*R+0.3575761*G+0.1804375*B Y = 0.2126729*R+0.7151522*G+0.0721750*B Z = 0.0193339*R+0.1191920*G+0.9503041*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.4124564f*(MagickRealType) i; y_map[i].x=0.3575761f*(MagickRealType) i; z_map[i].x=0.1804375f*(MagickRealType) i; x_map[i].y=0.2126729f*(MagickRealType) i; y_map[i].y=0.7151522f*(MagickRealType) i; z_map[i].y=0.0721750f*(MagickRealType) i; x_map[i].z=0.0193339f*(MagickRealType) i; y_map[i].z=0.1191920f*(MagickRealType) i; z_map[i].z=0.9503041f*(MagickRealType) i; } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.29900*R+0.58700*G+0.11400*B C1= -0.29900*R-0.58700*G+0.88600*B C2= 0.70100*R-0.58700*G-0.11400*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617f*(MagickRealType) i; y_map[i].x=0.007778268551236748f*(MagickRealType) i; z_map[i].x=0.001510600706713781f*(MagickRealType) i; x_map[i].y=(-0.002426619775463276f)*(MagickRealType) i; y_map[i].y=(-0.004763965913702149f)*(MagickRealType) i; z_map[i].y=0.007190585689165425f*(MagickRealType) i; x_map[i].z=0.006927257754597858f*(MagickRealType) i; y_map[i].z=(-0.005800713697502058f)*(MagickRealType) i; z_map[i].z=(-0.0011265440570958f)*(MagickRealType) i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099f*(MagickRealType) i-0.099f); y_map[i].x=0.4321260306242638*(1.099f*(MagickRealType) i-0.099f); z_map[i].x=0.08392226148409894*(1.099f*(MagickRealType) i-0.099f); x_map[i].y=(-0.1348122097479598)*(1.099f*(MagickRealType) i-0.099f); y_map[i].y=(-0.2646647729834528)*(1.099f*(MagickRealType) i-0.099f); z_map[i].y=0.3994769827314126*(1.099f*(MagickRealType) i-0.099f); x_map[i].z=0.3848476530332144*(1.099f*(MagickRealType) i-0.099f); y_map[i].z=(-0.3222618720834477)*(1.099f*(MagickRealType) i-0.099f); z_map[i].z=(-0.06258578094976668)*(1.099f*(MagickRealType) i-0.099f); } break; } case YIQColorspace: { /* Initialize YIQ tables: Y = 0.29900*R+0.58700*G+0.11400*B I = 0.59600*R-0.27400*G-0.32200*B Q = 0.21100*R-0.52300*G+0.31200*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.29900f*(MagickRealType) i; y_map[i].x=0.58700f*(MagickRealType) i; z_map[i].x=0.11400f*(MagickRealType) i; x_map[i].y=0.59600f*(MagickRealType) i; y_map[i].y=(-0.27400f)*(MagickRealType) i; z_map[i].y=(-0.32200f)*(MagickRealType) i; x_map[i].z=0.21100f*(MagickRealType) i; y_map[i].z=(-0.52300f)*(MagickRealType) i; z_map[i].z=0.31200f*(MagickRealType) i; } break; } case YPbPrColorspace: { /* Initialize YPbPr tables (ITU-R BT.601): Y = 0.299000*R+0.587000*G+0.114000*B Pb= -0.168736*R-0.331264*G+0.500000*B Pr= 0.500000*R-0.418688*G-0.081312*B Pb and Pr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.299000f*(MagickRealType) i; y_map[i].x=0.587000f*(MagickRealType) i; z_map[i].x=0.114000f*(MagickRealType) i; x_map[i].y=(-0.168736f)*(MagickRealType) i; y_map[i].y=(-0.331264f)*(MagickRealType) i; z_map[i].y=0.500000f*(MagickRealType) i; x_map[i].z=0.500000f*(MagickRealType) i; y_map[i].z=(-0.418688f)*(MagickRealType) i; z_map[i].z=(-0.081312f)*(MagickRealType) i; } break; } case YUVColorspace: default: { /* Initialize YUV tables: Y = 0.29900*R+0.58700*G+0.11400*B U = -0.14740*R-0.28950*G+0.43690*B V = 0.61500*R-0.51500*G-0.10000*B U and V, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. Note that U = 0.493*(B-Y), V = 0.877*(R-Y). */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.29900f*(MagickRealType) i; y_map[i].x=0.58700f*(MagickRealType) i; z_map[i].x=0.11400f*(MagickRealType) i; x_map[i].y=(-0.14740f)*(MagickRealType) i; y_map[i].y=(-0.28950f)*(MagickRealType) i; z_map[i].y=0.43690f*(MagickRealType) i; x_map[i].z=0.61500f*(MagickRealType) i; y_map[i].z=(-0.51500f)*(MagickRealType) i; z_map[i].z=(-0.10000f)*(MagickRealType) i; } break; } } /* Convert from RGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(GetRedPixelComponent(q)); green=ScaleQuantumToMap(GetGreenPixelComponent(q)); blue=ScaleQuantumToMap(GetBluePixelComponent(q)); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetRedPixelComponent(q,ScaleMapToQuantum(pixel.red)); SetGreenPixelComponent(q,ScaleMapToQuantum(pixel.green)); SetBluePixelComponent(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ image_view=AcquireCacheView(image); for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } image_view=DestroyCacheView(image_view); (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { image->colorspace=colorspace; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colorspace == UndefinedColorspace) { if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); } if (image->colorspace == colorspace) return(MagickTrue); if ((colorspace == RGBColorspace) || (colorspace == TransparentColorspace)) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if ((image->colorspace != RGBColorspace) && (image->colorspace != TransparentColorspace) && (image->colorspace != GRAYColorspace)) status=TransformRGBImage(image,image->colorspace); if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to RGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static double LabF2(double alpha) { double beta; if (alpha > (24.0/116.0)) return(alpha*alpha*alpha); beta=(108.0/841.0)*(alpha-(16.0/116.0)); if (beta > 0.0) return(beta); return(0.0); } static inline void ConvertLabToXYZ(const double L,const double a,const double b, double *X,double *Y,double *Z) { double x, y, z; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); *X=0.0; *Y=0.0; *Z=0.0; if (L <= 0.0) return; y=(100.0*L+16.0)/116.0; x=y+255.0*0.002*(a > 0.5 ? a-1.0 : a); z=y-255.0*0.005*(b > 0.5 ? b-1.0 : b); *X=D50X*LabF2(x); *Y=D50Y*LabF2(y); *Z=D50Z*LabF2(z); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertXYZToRGB(const double x,const double y,const double z, Quantum *red,Quantum *green,Quantum *blue) { double b, g, r; /* Convert XYZ to RGB colorspace. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); r=3.2404542*x-1.5371385*y-0.4985314*z; g=(-0.9692660*x+1.8760108*y+0.0415560*z); b=0.0556434*x-0.2040259*y+1.0572252*z; if (r > 0.0031308) r=1.055*pow(r,1.0/2.4)-0.055; else r*=12.92; if (g > 0.0031308) g=1.055*pow(g,1.0/2.4)-0.055; else g*=12.92; if (b > 0.0031308) b=1.055*pow(b,1.0/2.4)-0.055; else b*=12.92; *red=RoundToQuantum((MagickRealType) QuantumRange*r); *green=RoundToQuantum((MagickRealType) QuantumRange*g); *blue=RoundToQuantum((MagickRealType) QuantumRange*b); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=(MagickRealType) QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index); pixel->green=(MagickRealType) QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index); pixel->blue=(MagickRealType) QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define D50X (0.9642) #define D50Y (1.0) #define D50Z (0.8249) #define TransformRGBImageTag "Transform/Image" #if !defined(MAGICKCORE_HDRI_SUPPORT) static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; #endif CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); switch (colorspace) { case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: case RGBColorspace: case TransparentColorspace: case UndefinedColorspace: return(MagickTrue); default: break; } status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYColorspace: { /* Transform image from CMY to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetRedPixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetRedPixelComponent(q)))); SetGreenPixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetGreenPixelComponent(q)))); SetBluePixelComponent(q,ClampToQuantum((MagickRealType) (QuantumRange-GetBluePixelComponent(q)))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HSBColorspace: { /* Transform image from HSB to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double brightness, hue, saturation; MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; hue=(double) (QuantumScale*GetRedPixelComponent(q)); saturation=(double) (QuantumScale*GetGreenPixelComponent(q)); brightness=(double) (QuantumScale*GetBluePixelComponent(q)); ConvertHSBToRGB(hue,saturation,brightness,&red,&green,&blue); SetRedPixelComponent(q,red); SetGreenPixelComponent(q,green); SetBluePixelComponent(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HSLColorspace: { /* Transform image from HSL to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double hue, lightness, saturation; MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; hue=(double) (QuantumScale*GetRedPixelComponent(q)); saturation=(double) (QuantumScale*GetGreenPixelComponent(q)); lightness=(double) (QuantumScale*GetBluePixelComponent(q)); ConvertHSLToRGB(hue,saturation,lightness,&red,&green,&blue); SetRedPixelComponent(q,red); SetGreenPixelComponent(q,green); SetBluePixelComponent(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HWBColorspace: { /* Transform image from HWB to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blackness, hue, whiteness; MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; hue=(double) (QuantumScale*GetRedPixelComponent(q)); whiteness=(double) (QuantumScale*GetGreenPixelComponent(q)); blackness=(double) (QuantumScale*GetBluePixelComponent(q)); ConvertHWBToRGB(hue,whiteness,blackness,&red,&green,&blue); SetRedPixelComponent(q,red); SetGreenPixelComponent(q,green); SetBluePixelComponent(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LabColorspace: { /* Transform image from Lab to RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { double a, b, L, X, Y, Z; MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } X=0.0; Y=0.0; Z=0.0; for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; L=QuantumScale*GetRedPixelComponent(q); a=QuantumScale*GetGreenPixelComponent(q); b=QuantumScale*GetBluePixelComponent(q); ConvertLabToXYZ(L,a,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); SetRedPixelComponent(q,red); SetGreenPixelComponent(q,green); SetBluePixelComponent(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to RGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=1.0/InterpretLocaleValue(value,(char **) NULL) != 0.0 ? InterpretLocaleValue(value,(char **) NULL) : 1.0; film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=InterpretLocaleValue(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=InterpretLocaleValue(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=InterpretLocaleValue(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)* 0.002/film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)* (gamma/density)*0.002/film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=(Quantum) QuantumRange; if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { SetRedPixelComponent(q,logmap[ScaleQuantumToMap( GetRedPixelComponent(q))]); SetGreenPixelComponent(q,logmap[ScaleQuantumToMap( GetGreenPixelComponent(q))]); SetBluePixelComponent(q,logmap[ScaleQuantumToMap( GetBluePixelComponent(q))]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.500000f*(2.000000*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].x=(-0.333340f)*(2.000000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=0.000000f; z_map[i].y=0.666665f*(2.000000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=(-0.500000f)*(2.000000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].z=(-0.333340f)*(2.000000f*(MagickRealType) i-(MagickRealType) MaxMap); } break; } case Rec601YCbCrColorspace: case YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.000000f; z_map[i].x=(1.402000f*0.500000f)*(2.000000f*(MagickRealType) i- (MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=(-0.344136f*0.500000f)*(2.000000f*(MagickRealType) i- (MagickRealType) MaxMap); z_map[i].y=(-0.714136f*0.500000f)*(2.000000f*(MagickRealType) i- (MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=(1.772000f*0.500000f)*(2.000000f*(MagickRealType) i- (MagickRealType) MaxMap); z_map[i].z=0.000000f; } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.000000f; z_map[i].x=(1.574800f*0.50000f)*(2.00000f*(MagickRealType) i- (MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=(-0.187324f*0.50000f)*(2.00000f*(MagickRealType) i- (MagickRealType) MaxMap); z_map[i].y=(-0.468124f*0.50000f)*(2.00000f*(MagickRealType) i- (MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=(1.855600f*0.50000f)*(2.00000f*(MagickRealType) i- (MagickRealType) MaxMap); z_map[i].z=0.00000f; } break; } case sRGBColorspace: { /* Nonlinear sRGB to linear RGB. R = 1.0*R+0.0*G+0.0*B G = 0.0*R+1.0*G+0.0*B B = 0.0*R+0.0*G+1.0*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=1.0f*(MagickRealType) i; y_map[i].x=0.0f*(MagickRealType) i; z_map[i].x=0.0f*(MagickRealType) i; x_map[i].y=0.0f*(MagickRealType) i; y_map[i].y=1.0f*(MagickRealType) i; z_map[i].y=0.0f*(MagickRealType) i; x_map[i].z=0.0f*(MagickRealType) i; y_map[i].z=0.0f*(MagickRealType) i; z_map[i].z=1.0f*(MagickRealType) i; } break; } case XYZColorspace: { /* Initialize CIE XYZ tables (ITU R-709 RGB): R = 3.2404542*X-1.5371385*Y-0.4985314*Z G = -0.9692660*X+1.8760108*Y+0.0415560*Z B = 0.0556434*X-0.2040259*Y+1.057225*Z */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=3.2404542f*(MagickRealType) i; x_map[i].y=(-0.9692660f)*(MagickRealType) i; x_map[i].z=0.0556434f*(MagickRealType) i; y_map[i].x=(-1.5371385f)*(MagickRealType) i; y_map[i].y=1.8760108f*(MagickRealType) i; y_map[i].z=(-0.2040259f)*(MagickRealType) i; z_map[i].x=(-0.4985314f)*(MagickRealType) i; z_map[i].y=0.0415560f*(MagickRealType) i; z_map[i].z=1.0572252f*(MagickRealType) i; } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=1.3584000f*(MagickRealType) i; y_map[i].x=0.0000000f; z_map[i].x=1.8215000f*((MagickRealType) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137))); x_map[i].y=1.3584000f*(MagickRealType) i; y_map[i].y=(-0.4302726f)*((MagickRealType) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156))); z_map[i].y=(-0.9271435f)*((MagickRealType) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137))); x_map[i].z=1.3584000f*(MagickRealType) i; y_map[i].z=2.2179000f*((MagickRealType) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156))); z_map[i].z=0.0000000f; } break; } case YIQColorspace: { /* Initialize YIQ tables: R = Y+0.95620*I+0.62140*Q G = Y-0.27270*I-0.64680*Q B = Y-1.10370*I+1.70060*Q I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.47810f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].x=0.31070f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=(-0.13635f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].y=(-0.32340f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=(-0.55185f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].z=0.85030f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); } break; } case YPbPrColorspace: { /* Initialize YPbPr tables: R = Y +1.402000*C2 G = Y-0.344136*C1+0.714136*C2 B = Y+1.772000*C1 Pb and Pr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.000000f; z_map[i].x=0.701000f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=(-0.172068f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].y=0.357068f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=0.88600f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].z=0.00000f; } break; } case YUVColorspace: default: { /* Initialize YUV tables: R = Y +1.13980*V G = Y-0.39380*U-0.58050*V B = Y+2.02790*U U and V, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) i; y_map[i].x=0.00000f; z_map[i].x=0.56990f*(2.0000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].y=(MagickRealType) i; y_map[i].y=(-0.19690f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].y=(-0.29025f)*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); x_map[i].z=(MagickRealType) i; y_map[i].z=1.01395f*(2.00000f*(MagickRealType) i-(MagickRealType) MaxMap); z_map[i].z=0.00000f; } break; } } /* Convert to RGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetRedPixelComponent(q)); green=ScaleQuantumToMap(GetGreenPixelComponent(q)); blue=ScaleQuantumToMap(GetBluePixelComponent(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; switch (colorspace) { case YCCColorspace: { #if !defined(MAGICKCORE_HDRI_SUPPORT) pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale* pixel.red)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale* pixel.green)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale* pixel.blue)]; #endif break; } case sRGBColorspace: { if ((QuantumScale*pixel.red) <= 0.0031308) pixel.red*=12.92f; else pixel.red=(MagickRealType) QuantumRange*(1.055* pow(QuantumScale*pixel.red,(1.0/2.4))-0.055); if ((QuantumScale*pixel.green) <= 0.0031308) pixel.green*=12.92f; else pixel.green=(MagickRealType) QuantumRange*(1.055* pow(QuantumScale*pixel.green,(1.0/2.4))-0.055); if ((QuantumScale*pixel.blue) <= 0.0031308) pixel.blue*=12.92f; else pixel.blue=(MagickRealType) QuantumRange*(1.055* pow(QuantumScale*pixel.blue,(1.0/2.4))-0.055); break; } default: break; } SetRedPixelComponent(q,ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.red)); SetGreenPixelComponent(q,ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.green)); SetBluePixelComponent(q,ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; switch (colorspace) { case YCCColorspace: { #if !defined(MAGICKCORE_HDRI_SUPPORT) image->colormap[i].red=(Quantum) (QuantumRange*YCCMap[ RoundToYCC(1024.0*QuantumScale*pixel.red)]); image->colormap[i].green=(Quantum) (QuantumRange*YCCMap[ RoundToYCC(1024.0*QuantumScale*pixel.green)]); image->colormap[i].blue=(Quantum) (QuantumRange*YCCMap[ RoundToYCC(1024.0*QuantumScale*pixel.blue)]); #endif break; } case sRGBColorspace: { if ((QuantumScale*pixel.red) <= 0.0031308) pixel.red*=12.92f; else pixel.red=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale* pixel.red,(1.0/2.4))-0.055); if ((QuantumScale*pixel.green) <= 0.0031308) pixel.green*=12.92f; else pixel.green=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale* pixel.green,(1.0/2.4))-0.055); if ((QuantumScale*pixel.blue) <= 0.0031308) pixel.blue*=12.92f; else pixel.blue=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale* pixel.blue,(1.0/2.4))-0.055); } default: { image->colormap[i].red=ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.red); image->colormap[i].green=ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.green); image->colormap[i].blue=ScaleMapToQuantum((MagickRealType) MaxMap* QuantumScale*pixel.blue); break; } } } image_view=DestroyCacheView(image_view); (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,RGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
initialize-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header-brisbane.h" //--------------------------------------------------------------------- // This subroutine initializes the field variable u using // tri-linear transfinite interpolation of the boundary values //--------------------------------------------------------------------- void initialize() { int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; //--------------------------------------------------------------------- // Later (in compute_rhs) we compute 1/u for every element. A few of // the corner elements are not used, but it convenient (and faster) // to compute the whole thing with a simple loop. Make sure those // values are nonzero by initializing the whole thing here. //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { for (j = 0; j <= grid_points[1]-1; j++) { for (i = 0; i <= grid_points[0]-1; i++) { u[0][k][j][i] = 1.0; u[1][k][j][i] = 0.0; u[2][k][j][i] = 0.0; u[3][k][j][i] = 0.0; u[4][k][j][i] = 1.0; } } } //--------------------------------------------------------------------- // first store the "interpolated" values everywhere on the grid //--------------------------------------------------------------------- for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (ix = 0; ix < 2; ix++) { Pxi = (double)ix; exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]); } for (iy = 0; iy < 2; iy++) { Peta = (double)iy; exact_solution(xi, Peta, zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { Pzeta = (double)iz; exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[m][k][j][i] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } //--------------------------------------------------------------------- // now store the exact values on the boundaries //--------------------------------------------------------------------- //--------------------------------------------------------------------- // west face //--------------------------------------------------------------------- xi = 0.0; i = 0; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // east face //--------------------------------------------------------------------- xi = 1.0; i = grid_points[0]-1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // south face //--------------------------------------------------------------------- eta = 0.0; j = 0; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // north face //--------------------------------------------------------------------- eta = 1.0; j = grid_points[1]-1; for (k = 0; k <= grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // bottom face //--------------------------------------------------------------------- zeta = 0.0; k = 0; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } //--------------------------------------------------------------------- // top face //--------------------------------------------------------------------- zeta = 1.0; k = grid_points[2]-1; for (j = 0; j <= grid_points[1]-1; j++) { eta = (double)j * dnym1; for (i =0; i <= grid_points[0]-1; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][k][j][i] = temp[m]; } } } #pragma omp target update to(u) brisbane_task task0; brisbane_task_create(&task0); brisbane_task_h2d_full(task0, mem_u, u); brisbane_task_submit(task0, brisbane_default, NULL, true); } /* void lhsinit(int ni, int nj) { int j, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (j = 1; j <= nj; j++) { for (m = 0; m < 5; m++) { lhs [j][0][m] = 0.0; lhsp[j][0][m] = 0.0; lhsm[j][0][m] = 0.0; lhs [j][ni][m] = 0.0; lhsp[j][ni][m] = 0.0; lhsm[j][ni][m] = 0.0; } lhs [j][0][2] = 1.0; lhsp[j][0][2] = 1.0; lhsm[j][0][2] = 1.0; lhs [j][ni][2] = 1.0; lhsp[j][ni][2] = 1.0; lhsm[j][ni][2] = 1.0; } } void lhsinitj(int nj, int ni) { int i, m; //--------------------------------------------------------------------- // zap the whole left hand side for starters // set all diagonal values to 1. This is overkill, but convenient //--------------------------------------------------------------------- for (i = 1; i <= ni; i++) { for (m = 0; m < 5; m++) { lhs [0][i][m] = 0.0; lhsp[0][i][m] = 0.0; lhsm[0][i][m] = 0.0; lhs [nj][i][m] = 0.0; lhsp[nj][i][m] = 0.0; lhsm[nj][i][m] = 0.0; } lhs [0][i][2] = 1.0; lhsp[0][i][2] = 1.0; lhsm[0][i][2] = 1.0; lhs [nj][i][2] = 1.0; lhsp[nj][i][2] = 1.0; lhsm[nj][i][2] = 1.0; } }*/
jinv_ddd_in_h.h
//**************************************************************************************** // // Copyright (c) 2015-2020, Yoshifumi Nakamura <nakamura@riken.jp> // Copyright (c) 2015-2020, Yuta Mukai <mukai.yuta@fujitsu.com> // Copyright (c) 2018-2020, Ken-Ichi Ishikawa <ishikawa@theo.phys.sci.hirosima-u.ac.jp> // Copyright (c) 2019-2020, Issaku Kanamori <kanamori-i@riken.jp> // // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer listed // in this license in the documentation and/or other materials // provided with the distribution. // // * Neither the name of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //---------------------------------------------------------------------------------------- // ACKNOWLEDGMENT // // This software has been developed in a co-design working group for the lattice QCD // supported by MEXT's programs for the Development and Improvement for the Next // Generation Ultra High-Speed Computer System, under its Subsidies for Operating the // Specific Advanced Large Research Facilities, and Priority Issue 9 // (Elucidation of the Fundamental Laws and Evolution of the Universe) to be tackled by // using the Supercomputer Fugaku. // //**************************************************************************************** #ifndef JINV_DDD_IN_H_H #define JINV_DDD_IN_H_H void jinv_ddd_in_h_(sch_t * __restrict__ x, const sch_t * __restrict__ b, const int *DEO, const int *maxiter) // // Multiply approximate inverse of Wilson/Clover operator in a domain block // // xe = Aee be or xo = Aoo bo // // Aee, Aoo : approximate inverse for (Dee)^-1 and (Doo)^-1, respectively // // x : quark field in a even/odd domain (output) // b : quark field in a even/odd domain (input) // DEO : even/odd block index (0 for even, 1 for odd) // maxiter : Jacobbi tieration count for approximate inverse // { __attribute__((aligned(64))) static sch_t *q; if (q==0) q = (sch_t*)malloc( sizeof(sch_t) * vols); //rvech_t rvd0; //rvd0 = fload1_s((float)2); /////////////////// // q = Ab /////////////////// ddd_in_h_(q, b, DEO); /////////////////// // x = 2 b - q /////////////////// #pragma omp parallel for for(int i=0; i<vols; i++){ for(int j=0; j<24; j++){ for(int v=0; v < VLENS; v++) { x[i].ccs[j].v[v] = 2.0f * b[i].ccs[j].v[v] - q[i].ccs[j].v[v]; } } } ////////////////////////// // Jacobbi iteration ////////////////////////// for (int iter=1; iter<(*maxiter);iter++){ /////////////////// // q = Ax /////////////////// ddd_in_h_(q, x, DEO); /////////////////// // x = x + b - q /////////////////// #pragma omp parallel for for(int i=0; i<vols; i++){ for(int j=0; j<24; j++){ for(int v=0; v < VLENS; v++) { x[i].ccs[j].v[v] += b[i].ccs[j].v[v] - q[i].ccs[j].v[v]; } } } }//iter // free(q); } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-5,6)),ceild(8*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(4*t1+Ny+5,24)),floord(8*t2+Ny+4,24)),floord(8*t1-8*t2+Nz+Ny+3,24));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(24*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(24*t3+Nx+20,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),24*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),24*t3+22),64*t4+62),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
linalg.h
/** * Copyright (c) 2020, Massachusetts Institute of Technology, * Cambridge, MA 02139 * All Rights Reserved * Authors: Jingnan Shi, et al. (see THANKS for the full author list) * See LICENSE for the license information */ #pragma once #include <iostream> #include <Eigen/Core> #include <Eigen/SparseCore> #include <Eigen/Eigenvalues> namespace teaser { /** * Return the hat map of the provided vector (a skew symmetric matrix). * @param u 3-by-1 vector * @param x 3-by-3 skew symmetric matrix */ Eigen::Matrix<double, 3, 3> hatmap(const Eigen::Matrix<double, 3, 1>& u) { Eigen::Matrix<double, 3, 3> x; // clang-format off x << 0, -u(2), u(1), u(2), 0, -u(0), -u(1), u(0), 0; // clang-format on return x; } /** * Vector-vector kronecker product function with fixed-size output * @tparam NumT * @tparam N size of the first vector * @tparam M size of the second vector * @param v1 [in] first vector * @param v2 [in] second vector * @param output [out] output vector */ template <typename NumT, int N, int M> void vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2, Eigen::Matrix<NumT, N * M, 1>* output) { #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < M; ++j) { (*output)[i * M + j] = v1[i] * v2[j]; } } } /** * Vector-vector kronecker product function with dynamic-size output * @tparam NumT numerical type for Eigen matrices (double, float, etc.) * @param v1 [in] first vector * @param v2 [in] second vector * @return Result of kronecker product */ template <typename NumT, int N, int M> Eigen::Matrix<NumT, Eigen::Dynamic, 1> vectorKron(const Eigen::Matrix<NumT, N, 1>& v1, const Eigen::Matrix<NumT, M, 1>& v2) { Eigen::Matrix<double, Eigen::Dynamic, 1> output(v1.rows() * v2.rows(), 1); #pragma omp parallel for collapse(2) shared(v1, v2, output) default(none) for (size_t i = 0; i < v1.rows(); ++i) { for (size_t j = 0; j < v2.rows(); ++j) { output[i * v2.rows() + j] = v1[i] * v2[j]; } } return output; } /** * Find the nearest (in Frobenius norm) Symmetric Positive Definite matrix to A * * See: https://www.sciencedirect.com/science/article/pii/0024379588902236 * * @tparam NumT numerical type for Eigen matrices (double, float, etc.) * @param A [in] input matrix * @param nearestPSD [out] output neaest positive semi-definite matrix * @param eig_threshold [in] optional threshold of determining the smallest eigen values */ template <typename NumT> void getNearestPSD(const Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>& A, Eigen::Matrix<NumT, Eigen::Dynamic, Eigen::Dynamic>* nearestPSD) { assert(A.rows() == A.cols()); nearestPSD->resize(A.rows(), A.cols()); // symmetrize A into B Eigen::MatrixXd B = (A + A.transpose()) / 2; // eigendecomposition of B Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> eig_B(B); Eigen::VectorXd De = eig_B.eigenvalues(); Eigen::MatrixXd De_positive = (De.array() < 0).select(0, De).asDiagonal(); Eigen::MatrixXd Ve = eig_B.eigenvectors(); *nearestPSD = Ve * De_positive * Ve.transpose(); } } // namespace teaser
tree.h
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/meta.h> #include <LightGBM/dataset.h> #ifdef USE_PROTO #include "model.pb.h" #endif // USE_PROTO #include <string> #include <vector> #include <memory> namespace LightGBM { #define kMaxTreeOutput (100) #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Construtor, from a string * \param str Model string */ explicit Tree(const std::string& str); #ifdef USE_PROTO /*! * \brief Construtor, from a protobuf object * \param model_tree Model protobuf object */ explicit Tree(const Model_Tree& model_tree); #endif // USE_PROTO ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scorese * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the traning process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; } else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; } } shrinkage_ *= rate; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] = val + leaf_value_[i]; } // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool is_predict_leaf_index) const; #ifdef USE_PROTO /*! \brief Serialize this object to protobuf object*/ void ToProto(Model_Tree& model_tree) const; #endif // USE_PROTO inline static bool IsZero(double fval) { if (fval > -kZeroAsMissingValueRange && fval <= kZeroAsMissingValueRange) { return true; } else { return false; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = int(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = int(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool is_predict_leaf_index) const; double ExpectedValue() const; int MaxDepth(); /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060) */ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current levas*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and mising value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<double> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief DataCount of leaves */ std::vector<data_size_t> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief DataCount of non-leaf nodes */ std::vector<data_size_t> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = Common::AvoidInf(gain); // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { const int max_path_len = MaxDepth()+1; PathElement *unique_path_data = new PathElement[(max_path_len*(max_path_len+1))/2]; TreeSHAP(feature_values, output, 0, 0, unique_path_data, 1, 1, -1); delete[] unique_path_data; } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth+1); RecomputeLeafDepths(right_child_[node], depth+1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
convolutiondepthwise_5x5_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const unsigned short* kptr = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out.row<unsigned short>(0); const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); #if __aarch64__ unsigned short* outptr1 = out.row<unsigned short>(1); const unsigned short* r5 = img0.row<const unsigned short>(5); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); // 4 * 25 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k02_03 = vld1q_u16(kptr + 8); uint16x8_t _k04_10 = vld1q_u16(kptr + 16); uint16x8_t _k11_12 = vld1q_u16(kptr + 24); uint16x8_t _k13_14 = vld1q_u16(kptr + 32); uint16x8_t _k20_21 = vld1q_u16(kptr + 40); uint16x8_t _k22_23 = vld1q_u16(kptr + 48); uint16x8_t _k24_30 = vld1q_u16(kptr + 56); uint16x8_t _k31_32 = vld1q_u16(kptr + 64); uint16x8_t _k33_34 = vld1q_u16(kptr + 72); uint16x8_t _k40_41 = vld1q_u16(kptr + 80); uint16x8_t _k42_43 = vld1q_u16(kptr + 88); uint16x4_t _k44 = vld1_u16(kptr + 96); #else // __aarch64__ float bias0_data[4]; if (bias) { bias0_data[0] = bias[g * 4 + 0]; bias0_data[1] = bias[g * 4 + 1]; bias0_data[2] = bias[g * 4 + 2]; bias0_data[3] = bias[g * 4 + 3]; } else { bias0_data[0] = 0.f; bias0_data[1] = 0.f; bias0_data[2] = 0.f; bias0_data[3] = 0.f; } const float* bias0_data_ptr = bias0_data; #endif // __aarch64__ int i = 0; #if __aarch64__ for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r10 r11 r12 r13 "shll2 v14.4s, %18.8h, #16 \n" "mov v24.16b, %29.16b \n" // sum00 "mov v25.16b, %29.16b \n" // sum01 "mov v26.16b, %29.16b \n" // sum02 "mov v27.16b, %29.16b \n" // sum03 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v28.16b, %29.16b \n" // sum10 "mov v29.16b, %29.16b \n" // sum11 "mov v30.16b, %29.16b \n" // sum12 "mov v31.16b, %29.16b \n" // sum13 "shll v15.4s, %16.4h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r14 r15 r16 r17 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r20 r21 r22 r23 "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r24 r25 r26 r27 "fmla v27.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r30 r31 r32 r33 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll v15.4s, %21.4h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r34 r35 r36 r37 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" // r40 r41 r42 r43 "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%6] \n" // r44 r45 r46 r47 "fmla v27.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "fmla v29.4s, v15.4s, v17.4s \n" "fmla v30.4s, v15.4s, v18.4s \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v24.4s, v14.4s, v17.4s \n" "fmla v25.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v14.4s, v19.4s \n" "fmla v27.4s, v14.4s, v20.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v24.4s, v14.4s, v19.4s \n" "fmla v25.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v14.4s, v21.4s \n" "fmla v27.4s, v14.4s, v22.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r00 r01 r02 r03 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v24.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v25.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v26.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r04 r05 r06 r07 "fmla v27.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v24.4s, v15.4s, v17.4s \n" "fmla v25.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v26.4s, v15.4s, v19.4s \n" "fmla v27.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v24.4s, v14.4s, v18.4s \n" "fmla v25.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v26.4s, v14.4s, v20.4s \n" "fmla v27.4s, v14.4s, v21.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v24.4s, v15.4s, v19.4s \n" "fmla v25.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v26.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" // r50 r51 r52 r53 "fmla v27.4s, v15.4s, v22.4s \n" "shll v15.4s, %26.4h, #16 \n" "fmla v24.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v25.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v26.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v27.4s, v14.4s, v23.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%7] \n" // r54 r55 r56 r57 "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v22.4s \n" "fmla v28.4s, v15.4s, v20.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r10 r11 "shll2 v14.4s, %18.8h, #16 \n" "mov v28.16b, %29.16b \n" // sum00 "mov v29.16b, %29.16b \n" // sum01 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %29.16b \n" // sum10 "mov v31.16b, %29.16b \n" // sum11 "prfm pldl1keep, [%3, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r12 r13 r14 r15 "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %16.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r20 r21 "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r22 r23 r24 r25 "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r30 r31 "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r32 r33 r34 r35 "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %21.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v16.4h, v17.4h}, [%6], #16 \n" // r40 r41 "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%6] \n" // r42 r43 r44 r45 "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r00 r01 "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v22.4h, v23.4h}, [%7], #16 \n" // r50 r51 "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r02 r03 r04 r05 "shll v23.4s, v23.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v15.4s, %26.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7] \n" // r52 r53 r54 r55 "shll2 v14.4s, %16.8h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %26.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %17.8h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %27.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v25.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v30.4s, v15.4s, v26.4s \n" "fmla v31.4s, v15.4s, v27.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h}, [%0], #16 \n" "st1 {v30.4h, v31.4h}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v16.4h}, [%3], #8 \n" // r10 "prfm pldl1keep, [%3, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r11 r12 r13 r14 "mov v30.16b, %29.16b \n" // sum00 "mov v31.16b, %29.16b \n" // sum10 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll2 v14.4s, %18.8h, #16 \n" "shll v15.4s, %16.4h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmul v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %19.8h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %17.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %20.8h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %18.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v16.4h}, [%4], #8 \n" // r20 "prfm pldl1keep, [%4, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r21 r22 r23 r24 "shll2 v15.4s, %18.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %21.8h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll v15.4s, %19.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %22.8h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %20.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll2 v14.4s, %23.8h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v16.4h}, [%5], #8 \n" // r30 "prfm pldl1keep, [%5, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r31 r32 r33 r34 "shll v15.4s, %21.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll2 v14.4s, %24.8h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll v15.4s, %22.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %25.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll2 v14.4s, %25.8h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll v15.4s, %23.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v14.4s, %26.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%6, #64] \n" "ld1 {v16.4h}, [%6], #8 \n" // r40 "prfm pldl1keep, [%6, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%6] \n" // r41 r42 r43 r44 "shll2 v15.4s, %23.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %26.8h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll v15.4s, %24.4h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %27.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %24.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %27.8h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll v15.4s, %25.4h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %28.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %25.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v16.4h}, [%2], #8 \n" // r00 "prfm pldl1keep, [%7, #64] \n" "ld1 {v21.4h}, [%7], #8 \n" // r50 "prfm pldl1keep, [%7, #256] \n" "ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%7] \n" // r51 r52 r53 r54 "prfm pldl1keep, [%2, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r01 r02 r03 r04 "shll v15.4s, %26.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll2 v14.4s, %16.8h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %26.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll v15.4s, %27.4h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll2 v14.4s, %17.8h, #16 \n" "fmla v29.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %27.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll v15.4s, %28.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v15.4s, v25.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h}, [%0], #8 \n" "st1 {v31.4h}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k00_01), // %16 "w"(_k02_03), // %17 "w"(_k04_10), // %18 "w"(_k11_12), // %19 "w"(_k13_14), // %20 "w"(_k20_21), // %21 "w"(_k22_23), // %22 "w"(_k24_30), // %23 "w"(_k31_32), // %24 "w"(_k33_34), // %25 "w"(_k40_41), // %26 "w"(_k42_43), // %27 "w"(_k44), // %28 "w"(_bias0) // %29 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "shll v14.4s, %12.4h, #16 \n" "mov v28.16b, %25.16b \n" // sum00 "mov v29.16b, %25.16b \n" // sum01 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum02 "mov v31.16b, %25.16b \n" // sum03 "shll2 v15.4s, %12.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1] \n" // r04 r05 r06 r07 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %15.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r14 r15 r16 r17 "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r24 r25 r26 r27 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r34 r35 r36 r37 "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "fmla v31.4s, v14.4s, v20.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r44 r45 r46 r47 "fmla v31.4s, v14.4s, v19.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "fmla v31.4s, v14.4s, v21.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v22.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmov q13, q12 \n" // sum0 sum1 "vmov q14, q12 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmov q15, q12 \n" // sum2 sum3 "vmla.f32 q12, q8, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q8, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q2 \n" "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64] \n" // r04 r05 r06 r07 "vmla.f32 q15, q8, q3 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vmla.f32 q13, q9, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q9, q3 \n" "vmla.f32 q15, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q12, q10, q2 \n" "vmla.f32 q13, q10, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q5 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13 "vmla.f32 q15, q11, q6 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q12, q10, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q10, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q10, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q10, q7 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q11, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q11, q2 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64] \n" // r14 r15 r16 r17 "vmla.f32 q15, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q12, q8, q1 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "vmla.f32 q15, q9, q5 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q12, q8, q3 \n" "vmla.f32 q13, q8, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q8, q5 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vmla.f32 q15, q8, q6 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q9, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q9, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q9, q7 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q12, q10, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q10, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q2 \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64] \n" // r24 r25 r26 r27 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q11, q3 \n" "vmla.f32 q15, q11, q4 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q12, q10, q2 \n" "vmla.f32 q13, q10, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q5 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33 "vmla.f32 q15, q11, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q12, q8, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q8, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q8, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q8, q7 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q9, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q9, q2 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64] \n" // r34 r35 r36 r37 "vmla.f32 q15, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q12, q8, q1 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "vmla.f32 q15, q9, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q3 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q5 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vmla.f32 q15, q10, q6 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vshll.u16 q0, d4, #16 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q6 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q15, q11, q7 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q12, q10, q0 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q13, q10, q1 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q2 \n" "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64] \n" // r44 r45 r46 r47 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q4, d12, #16 \n" "vmla.f32 q14, q11, q3 \n" "vmla.f32 q15, q11, q4 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q12, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q13, q8, q3 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q5 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q9, q5 \n" "vmla.f32 q15, q9, q6 \n" "vmla.f32 q12, q8, q4 \n" "vmla.f32 q13, q8, q5 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q8, q6 \n" "vmla.f32 q15, q8, q7 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01 "prfm pldl1keep, [%1, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%1] \n" // r02 r03 r04 r05 "shll v14.4s, %12.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum01 "mov v31.16b, %25.16b \n" // sum02 "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmul v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11 "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r12 r13 r14 r15 "shll v14.4s, %15.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21 "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r22 r23 r24 r25 "shll2 v15.4s, %17.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31 "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r32 r33 r34 r35 "shll v14.4s, %20.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41 "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r42 r43 r44 r45 "shll2 v15.4s, %22.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v29.4s, v14.4s, v17.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v21.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h, v31.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "pld [%2, #256] \n" "vld1.u16 {d8-d11}, [%2 :64] \n" // r02 r03 r04 r05 "vshll.u16 q0, d2, #16 \n" "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q14, q8, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmul.f32 q15, q8, q1 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "pld [%3, #128] \n" "vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q9, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q3 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q4 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q14, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q15, q10, q5 \n" "pld [%3, #256] \n" "vld1.u16 {d8-d11}, [%3 :64] \n" // r12 r13 r14 r15 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q11, q1 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q14, q8, q1 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q14, q8, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q9, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d8-d11}, [%4 :64] \n" // r22 r23 r24 r25 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q14, q10, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q15, q10, q1 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "pld [%5, #128] \n" "vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, q2 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q3 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q14, q8, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q15, q8, q5 \n" "pld [%5, #256] \n" "vld1.u16 {d8-d11}, [%5 :64] \n" // r32 r33 r34 r35 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q13, q9, q1 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q14, q8, q1 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41 "vshll.u16 q3, d9, #16 \n" "vmla.f32 q15, q8, q2 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q14, q10, q3 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q13, q11, q5 \n" "pld [%6, #256] \n" "vld1.u16 {d8-d11}, [%6 :64] \n" // r42 r43 r44 r45 "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q14, q10, q0 \n" "vshll.u16 q2, d8, #16 \n" "vmla.f32 q15, q10, q1 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q3, d9, #16 \n" "vmla.f32 q13, q11, q2 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q14, q8, q2 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q8, q3 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q5 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v16.4h}, [%1], #8 \n" // r00 "prfm pldl1keep, [%1, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%1] \n" // r01 r02 r03 r04 "shll v14.4s, %12.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "mov v31.16b, %25.16b \n" // sum01 "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmul v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmul v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v16.4h}, [%2], #8 \n" // r10 "prfm pldl1keep, [%2, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r11 r12 r13 r14 "shll v14.4s, %15.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v16.4h}, [%3], #8 \n" // r20 "prfm pldl1keep, [%3, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r21 r22 r23 r24 "shll2 v15.4s, %17.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v16.4h}, [%4], #8 \n" // r30 "prfm pldl1keep, [%4, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r31 r32 r33 r34 "shll v14.4s, %20.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v16.4h}, [%5], #8 \n" // r40 "prfm pldl1keep, [%5, #256] \n" "ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r41 r42 r43 r44 "shll2 v15.4s, %22.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fadd v29.4s, v29.4s, v30.4s \n" "fadd v31.4s, v31.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v31.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" // sum0 "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" // r00 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d6-d9}, [%2 :64] \n" // r01 r02 r03 r04 "vshll.u16 q0, d1, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vshll.u16 q1, d6, #16 \n" "vmul.f32 q13, q8, q0 \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3 :64]! \n" // r10 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmul.f32 q14, q9, q1 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmul.f32 q15, q10, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q4 \n" "pld [%3, #256] \n" "vld1.u16 {d6-d9}, [%3 :64] \n" // r11 r12 r13 r14 "vshll.u16 q8, d18, #16 \n" // k11 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q14, q11, q0 \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4 :64]! \n" // r20 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q13, q8, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q14, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d6-d9}, [%4 :64] \n" // r21 r22 r23 r24 "vshll.u16 q11, d23, #16 \n" // k21 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q15, q10, q0 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" // r30 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q14, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q4 \n" "pld [%5, #256] \n" "vld1.u16 {d6-d9}, [%5 :64] \n" // r31 r32 r33 r34 "vshll.u16 q8, d20, #16 \n" // k31 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q12, q9, q0 \n" "pld [%6, #64] \n" "vld1.u16 {d1}, [%6 :64]! \n" // r40 "vshll.u16 q2, d7, #16 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q13, q8, q1 \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q14, q9, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q3 \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "pld [%6, #256] \n" "vld1.u16 {d6-d9}, [%6 :64] \n" // r41 r42 r43 r44 "vshll.u16 q11, d17, #16 \n" // k41 "vshll.u16 q1, d6, #16 \n" "vmla.f32 q13, q10, q0 \n" "vshll.u16 q2, d7, #16 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q14, q11, q1 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q3, d8, #16 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q8, q4 \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q12, q12, q13 \n" "vadd.f32 q12, q12, q14 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); #if __aarch64__ float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); #endif // __aarch64__ const unsigned short* kptr = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); const unsigned short* r3 = img0.row<const unsigned short>(3); const unsigned short* r4 = img0.row<const unsigned short>(4); #if __aarch64__ // 4 * 25 uint16x8_t _k00_01 = vld1q_u16(kptr); uint16x8_t _k02_03 = vld1q_u16(kptr + 8); uint16x8_t _k04_10 = vld1q_u16(kptr + 16); uint16x8_t _k11_12 = vld1q_u16(kptr + 24); uint16x8_t _k13_14 = vld1q_u16(kptr + 32); uint16x8_t _k20_21 = vld1q_u16(kptr + 40); uint16x8_t _k22_23 = vld1q_u16(kptr + 48); uint16x8_t _k24_30 = vld1q_u16(kptr + 56); uint16x8_t _k31_32 = vld1q_u16(kptr + 64); uint16x8_t _k33_34 = vld1q_u16(kptr + 72); uint16x8_t _k40_41 = vld1q_u16(kptr + 80); uint16x8_t _k42_43 = vld1q_u16(kptr + 88); uint16x4_t _k44 = vld1_u16(kptr + 96); #else // __aarch64__ float bias0_data[4]; if (bias) { bias0_data[0] = bias[g * 4 + 0]; bias0_data[1] = bias[g * 4 + 1]; bias0_data[2] = bias[g * 4 + 2]; bias0_data[3] = bias[g * 4 + 3]; } else { bias0_data[0] = 0.f; bias0_data[1] = 0.f; bias0_data[2] = 0.f; bias0_data[3] = 0.f; } const float* bias0_data_ptr = bias0_data; #endif // __aarch64__ int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" // r04 r05 r06 r07 "shll v14.4s, %12.4h, #16 \n" "mov v28.16b, %25.16b \n" // sum00 "mov v29.16b, %25.16b \n" // sum01 "mov v30.16b, %25.16b \n" // sum02 "mov v31.16b, %25.16b \n" // sum03 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "prfm pldl1keep, [%1, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%1] \n" // r08 r09 r010 "shll2 v15.4s, %12.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r14 r15 r16 r17 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v14.4s, v26.4s \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%2] \n" // r18 r19 r110 "shll v14.4s, %15.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v29.4s, v14.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v14.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v14.4s, v25.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" // r24 r25 r26 r27 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%3] \n" // r28 r29 r210 "shll2 v15.4s, %17.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v25.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4], #32 \n" // r34 r35 r36 r37 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v14.4s, v26.4s \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%4] \n" // r38 r39 r310 "shll v14.4s, %20.4h, #16 \n" "fmla v28.4s, v15.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v14.4s, v21.4s \n" "fmla v31.4s, v14.4s, v23.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v28.4s, v15.4s, v18.4s \n" "fmla v29.4s, v15.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v15.4s, v22.4s \n" "fmla v31.4s, v15.4s, v24.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "fmla v29.4s, v14.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v14.4s, v23.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v14.4s, v25.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v28.4s, v15.4s, v20.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v29.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n" // r44 r45 r46 r47 "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v15.4s, v24.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v31.4s, v15.4s, v26.4s \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r48 r49 r410 "shll2 v15.4s, %22.8h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v14.4s, v22.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v28.4s, v15.4s, v17.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v30.4s, v15.4s, v21.4s \n" "fmla v31.4s, v15.4s, v23.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v30.4s, v14.4s, v22.4s \n" "fmla v31.4s, v14.4s, v24.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v28.4s, v15.4s, v19.4s \n" "fmla v29.4s, v15.4s, v21.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v30.4s, v15.4s, v23.4s \n" "fmla v31.4s, v15.4s, v25.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v22.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, v14.4s, v24.4s \n" "fmla v31.4s, v14.4s, v26.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vmov q14, q12 \n" "vmov q15, q12 \n" // sum2 sum3 "vshll.u16 q9, d21, #16 \n" // k01 "pld [%2, #256] \n" "vld1.u16 {d12-d15}, [%2 :64]! \n" // r04 r05 r06 r07 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q12, q8, q0 \n" "vmla.f32 q13, q8, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q6 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vmla.f32 q13, q9, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q9, q5 \n" "vmla.f32 q15, q9, q7 \n" "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r08 r09 "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q2 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q10, q6 \n" "vmla.f32 q15, q10, q0 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q11, q7 \n" "vmla.f32 q15, q11, q1 \n" "pld [%2, #64] \n" "vld1.u16 {d5}, [%2 :64] \n" // r010 "vmla.f32 q12, q10, q4 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q10, q0 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3 :64]! \n" // r10 r11 r12 r13 "vmla.f32 q15, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k11 "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r14 r15 r16 r17 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q12, q11, q4 \n" "vmla.f32 q13, q11, q6 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q14, q11, q0 \n" "vmla.f32 q15, q11, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q12, q8, q5 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q13, q8, q7 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q1 \n" "vmla.f32 q15, q8, q3 \n" "pld [%3, #128] \n" "vld1.u16 {d10-d11}, [%3 :64]! \n" // r18 r19 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q13, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q14, q9, q2 \n" "vmla.f32 q15, q9, q4 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q12, q8, q7 \n" "vmla.f32 q13, q8, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q8, q3 \n" "vmla.f32 q15, q8, q5 \n" "pld [%3, #64] \n" "vld1.u16 {d13}, [%3 :64] \n" // r110 "vmla.f32 q12, q9, q0 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q13, q9, q2 \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q14, q9, q4 \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vmla.f32 q15, q9, q6 \n" "vshll.u16 q11, d23, #16 \n" // k21 "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" // r24 r25 r26 r27 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q0 \n" "vmla.f32 q13, q10, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q5 \n" "vmla.f32 q15, q11, q7 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r28 r29 "vmla.f32 q12, q10, q2 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q10, q6 \n" "vmla.f32 q15, q10, q0 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q11, q7 \n" "vmla.f32 q15, q11, q1 \n" "pld [%4, #64] \n" "vld1.u16 {d5}, [%4 :64] \n" // r210 "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q12, q8, q4 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q13, q8, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q8, q0 \n" "pld [%5, #256] \n" "vld1.u16 {d12-d15}, [%5 :64]! \n" // r30 r31 r32 r33 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k31 "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r34 r35 r36 r37 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vmla.f32 q12, q9, q4 \n" "vmla.f32 q13, q9, q6 \n" "vshll.u16 q2, d6, #16 \n" "vmla.f32 q14, q9, q0 \n" "vmla.f32 q15, q9, q2 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q12, q8, q5 \n" "vmla.f32 q13, q8, q7 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q8, q1 \n" "vmla.f32 q15, q8, q3 \n" "pld [%5, #128] \n" "vld1.u16 {d10-d11}, [%5 :64]! \n" // r38 r39 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q13, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q14, q9, q2 \n" "vmla.f32 q15, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q12, q10, q7 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q13, q10, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q14, q10, q3 \n" "vmla.f32 q15, q10, q5 \n" "pld [%5, #64] \n" "vld1.u16 {d13}, [%5 :64] \n" // r310 "vmla.f32 q12, q11, q0 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q13, q11, q2 \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q14, q11, q4 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vmla.f32 q15, q11, q6 \n" "vshll.u16 q11, d17, #16 \n" // k41 "pld [%6, #256] \n" "vld1.u16 {d12-d15}, [%6 :64]! \n" // r44 r45 r46 r47 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vmla.f32 q12, q10, q0 \n" "vmla.f32 q13, q10, q2 \n" "vshll.u16 q6, d14, #16 \n" "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q14, q11, q5 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q15, q11, q7 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r48 r49 "vmla.f32 q12, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q13, q8, q4 \n" "vshll.u16 q0, d2, #16 \n" "vmla.f32 q14, q8, q6 \n" "vmla.f32 q15, q8, q0 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q9, q5 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q14, q9, q7 \n" "vmla.f32 q15, q9, q1 \n" "pld [%6, #64] \n" "vld1.u16 {d5}, [%6 :64] \n" // r410 "vmla.f32 q12, q8, q4 \n" "vmla.f32 q13, q8, q6 \n" "vshll.u16 q2, d5, #16 \n" "vmla.f32 q14, q8, q0 \n" "vmla.f32 q15, q8, q2 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" "sub %5, %5, #16 \n" "sub %6, %6, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d26, q14, #16 \n" "vshrn.u32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%1] \n" // r04 r05 r06 "shll v14.4s, %12.4h, #16 \n" "shll2 v15.4s, %12.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "mov v30.16b, %25.16b \n" // sum00 "mov v31.16b, %25.16b \n" // sum01 "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmul v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%2] \n" // r14 r15 r16 "shll v14.4s, %15.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23 "shll v22.4s, v22.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%3] \n" // r24 r25 r26 "shll2 v15.4s, %17.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33 "shll v22.4s, v22.4h, #16 \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v29.4s, v14.4s, v22.4s \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r34 r35 r36 "shll v14.4s, %20.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, v15.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v14.4s, v19.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v30.4s, v15.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43 "shll v22.4s, v22.4h, #16 \n" "fmla v29.4s, v14.4s, v21.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v30.4s, v15.4s, v20.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v31.4s, v15.4s, v22.4s \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%5] \n" // r44 r45 r46 "shll2 v15.4s, %22.8h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v29.4s, v14.4s, v18.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v30.4s, v15.4s, v17.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v29.4s, v14.4s, v20.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v30.4s, v15.4s, v19.4s \n" "fmla v31.4s, v15.4s, v21.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "fmla v29.4s, v14.4s, v22.4s \n" "fadd v30.4s, v30.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v30.4h, v31.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03 "vshll.u16 q8, d20, #16 \n" // k00 "pld [%2, #256] \n" "vld1.u16 {d10-d12}, [%2 :64] \n" // r04 r05 r06 "vmov q13, q12 \n" // sum0 sum1 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q14, q8, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmul.f32 q15, q8, q2 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmla.f32 q12, q9, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q14, q10, q2 \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q11, q5 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q14, q10, q4 \n" "vmla.f32 q15, q10, q6 \n" "pld [%3, #256] \n" "vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13 "vshll.u16 q8, d18, #16 \n" // k11 "pld [%3, #256] \n" "vld1.u16 {d10-d12}, [%3 :64] \n" // r14 r15 r16 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q11, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, q2 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q14, q8, q1 \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q3 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q9, q4 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q14, q8, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q15, q8, q5 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q12, q9, q4 \n" "vmla.f32 q13, q9, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "pld [%4, #256] \n" "vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23 "vshll.u16 q11, d23, #16 \n" // k21 "pld [%4, #256] \n" "vld1.u16 {d10-d12}, [%4 :64] \n" // r24 r25 r26 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q0 \n" "vmla.f32 q15, q10, q2 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q11, q3 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q14, q10, q2 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q10, q4 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q11, q5 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vmla.f32 q14, q8, q4 \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q6 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33 "vshll.u16 q8, d20, #16 \n" // k31 "pld [%5, #256] \n" "vld1.u16 {d10-d12}, [%5 :64] \n" // r34 r35 r36 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q12, q9, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q13, q9, q2 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q14, q8, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q15, q8, q3 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q12, q9, q2 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q9, q4 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vmla.f32 q14, q10, q3 \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q5 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q4 \n" "vmla.f32 q13, q11, q6 \n" "pld [%6, #256] \n" "vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43 "vshll.u16 q11, d17, #16 \n" // k41 "pld [%6, #256] \n" "vld1.u16 {d10-d12}, [%6 :64] \n" // r44 r45 r46 "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q14, q10, q0 \n" "vshll.u16 q4, d10, #16 \n" "vmla.f32 q15, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q12, q11, q1 \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q13, q11, q3 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vmla.f32 q14, q8, q2 \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q4 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vshll.u16 q6, d12, #16 \n" "vmla.f32 q13, q9, q5 \n" "vmla.f32 q14, q8, q4 \n" "vmla.f32 q15, q8, q6 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vst1.u16 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01 "prfm pldl1keep, [%1, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%1] \n" // r02 r03 r04 "shll v14.4s, %12.4h, #16 \n" "mov v31.16b, %25.16b \n" // sum00 "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "shll2 v15.4s, %12.8h, #16 \n" "fmul v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %13.4h, #16 \n" "fmul v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %13.8h, #16 \n" "fmul v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %14.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %14.8h, #16 \n" "fmla v28.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11 "prfm pldl1keep, [%2, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%2] \n" // r12 r13 r14 "shll v14.4s, %15.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v29.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %15.8h, #16 \n" "fmla v30.4s, v14.4s, v17.4s \n" "shll v14.4s, %16.4h, #16 \n" "fmla v31.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %16.8h, #16 \n" "fmla v28.4s, v14.4s, v19.4s \n" "shll v14.4s, %17.4h, #16 \n" "fmla v29.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21 "prfm pldl1keep, [%3, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%3] \n" // r22 r23 r24 "shll2 v15.4s, %17.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v30.4s, v14.4s, v16.4s \n" "shll v14.4s, %18.4h, #16 \n" "fmla v31.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %18.8h, #16 \n" "fmla v28.4s, v14.4s, v18.4s \n" "shll v14.4s, %19.4h, #16 \n" "fmla v29.4s, v15.4s, v19.4s \n" "shll2 v15.4s, %19.8h, #16 \n" "fmla v30.4s, v14.4s, v20.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31 "prfm pldl1keep, [%4, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%4] \n" // r32 r33 r34 "shll v14.4s, %20.4h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v31.4s, v15.4s, v16.4s \n" "shll2 v15.4s, %20.8h, #16 \n" "fmla v28.4s, v14.4s, v17.4s \n" "shll v14.4s, %21.4h, #16 \n" "fmla v29.4s, v15.4s, v18.4s \n" "shll2 v15.4s, %21.8h, #16 \n" "fmla v30.4s, v14.4s, v19.4s \n" "shll v14.4s, %22.4h, #16 \n" "fmla v31.4s, v15.4s, v20.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41 "prfm pldl1keep, [%5, #192] \n" "ld1 {v18.4h, v19.4h, v20.4h}, [%5] \n" // r42 r43 r44 "shll2 v15.4s, %22.8h, #16 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "shll v20.4s, v20.4h, #16 \n" "fmla v28.4s, v14.4s, v16.4s \n" "shll v14.4s, %23.4h, #16 \n" "fmla v29.4s, v15.4s, v17.4s \n" "shll2 v15.4s, %23.8h, #16 \n" "fmla v30.4s, v14.4s, v18.4s \n" "shll v14.4s, %24.4h, #16 \n" "fmla v31.4s, v15.4s, v19.4s \n" "fmla v28.4s, v14.4s, v20.4s \n" "fadd v29.4s, v29.4s, v30.4s \n" "fadd v31.4s, v31.4s, v28.4s \n" "fadd v31.4s, v31.4s, v29.4s \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v31.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k00_01), // %12 "w"(_k02_03), // %13 "w"(_k04_10), // %14 "w"(_k11_12), // %15 "w"(_k13_14), // %16 "w"(_k20_21), // %17 "w"(_k22_23), // %18 "w"(_k24_30), // %19 "w"(_k31_32), // %20 "w"(_k33_34), // %21 "w"(_k40_41), // %22 "w"(_k42_43), // %23 "w"(_k44), // %24 "w"(_bias0) // %25 : "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else // __aarch64__ asm volatile( "pld [%2, #128] \n" "vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01 "pld [%2, #192] \n" "vld1.u16 {d6-d8}, [%2 :64] \n" // r02 r03 r04 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q8, d20, #16 \n" // k00 "pld [%1, #128] \n" "vld1.f32 {d24-d25}, [%1] \n" // sum0 "vshll.u16 q9, d21, #16 \n" // k01 "vmul.f32 q13, q8, q0 \n" "vshll.u16 q10, d22, #16 \n" // k02 "vmul.f32 q14, q9, q1 \n" "pld [%3, #128] \n" "vld1.u16 {d14-d15}, [%3 :64]! \n" // r10 r11 "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k03 "vmul.f32 q15, q10, q2 \n" "vshll.u16 q10, d16, #16 \n" // k04 "vmla.f32 q12, q11, q3 \n" "vshll.u16 q11, d17, #16 \n" // k10 "vmla.f32 q13, q10, q4 \n" "pld [%3, #192] \n" "vld1.u16 {d8-d10}, [%3 :64] \n" // r12 r13 r14 "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q8, d18, #16 \n" // k11 "vmla.f32 q14, q11, q6 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k12 "vmla.f32 q15, q8, q7 \n" "pld [%4, #128] \n" "vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21 "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q5, d10, #16 \n" "vshll.u16 q8, d20, #16 \n" // k13 "vmla.f32 q12, q9, q3 \n" "vshll.u16 q9, d21, #16 \n" // k14 "vmla.f32 q13, q8, q4 \n" "vshll.u16 q10, d22, #16 \n" // k20 "vmla.f32 q14, q9, q5 \n" "pld [%4, #192] \n" "vld1.u16 {d6-d8}, [%4 :64] \n" // r22 r23 r24 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k21 "vmla.f32 q15, q10, q0 \n" "vshll.u16 q10, d16, #16 \n" // k22 "vmla.f32 q12, q11, q1 \n" "pld [%5, #128] \n" "vld1.u16 {d14-d15}, [%5 :64]! \n" // r30 r31 "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "vshll.u16 q11, d17, #16 \n" // k23 "vmla.f32 q13, q10, q2 \n" "vshll.u16 q8, d18, #16 \n" // k24 "vmla.f32 q14, q11, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d20-d23}, [%7 :64]! \n" "vshll.u16 q9, d19, #16 \n" // k30 "vmla.f32 q15, q8, q4 \n" "pld [%5, #192] \n" "vld1.u16 {d8-d10}, [%5 :64] \n" // r32 r33 r34 "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vshll.u16 q8, d20, #16 \n" // k31 "vmla.f32 q12, q9, q6 \n" "vshll.u16 q9, d21, #16 \n" // k32 "vmla.f32 q13, q8, q7 \n" "pld [%6, #128] \n" "vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41 "vshll.u16 q3, d8, #16 \n" "vshll.u16 q4, d9, #16 \n" "vshll.u16 q5, d10, #16 \n" "vshll.u16 q10, d22, #16 \n" // k33 "vmla.f32 q14, q9, q3 \n" "pld [%7, #256] \n" "vld1.u16 {d16-d19}, [%7 :64]! \n" "vshll.u16 q11, d23, #16 \n" // k34 "vmla.f32 q15, q10, q4 \n" "vshll.u16 q10, d16, #16 \n" // k40 "vmla.f32 q12, q11, q5 \n" "pld [%6, #192] \n" "vld1.u16 {d6-d8}, [%6 :64] \n" // r42 r43 r44 "vshll.u16 q0, d2, #16 \n" "vshll.u16 q1, d3, #16 \n" "vshll.u16 q11, d17, #16 \n" // k41 "vmla.f32 q13, q10, q0 \n" "vshll.u16 q8, d18, #16 \n" // k42 "vmla.f32 q14, q11, q1 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d8, #16 \n" "pld [%7, #64] \n" "vld1.u16 {d20}, [%7 :64] \n" "vshll.u16 q9, d19, #16 \n" // k43 "vmla.f32 q15, q8, q2 \n" "vshll.u16 q8, d20, #16 \n" // k44 "vmla.f32 q12, q9, q3 \n" "vmla.f32 q13, q8, q4 \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q12, q12, q13 \n" "sub %7, %7, #192 \n" // kptr -= 24 * 4; "vadd.f32 q12, q12, q14 \n" "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(bias0_data_ptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(kptr) // %7 : "0"(outptr0), "1"(bias0_data_ptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
omp_master.c
#include <stdio.h> int main() { #pragma omp parallel { #pragma omp master { printf("1.Hello OpenMP\n"); } printf("2.Hello OpenMP\n"); } /* implicit barrier */ return 0; }
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // v11 = rx1 / rx3 // v12 = rx2 // v13 v14 = intermediate sum register "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // v7 = out "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.4s}, [%2] \n" // v8 = out2 // r1 "prfm pldl1keep, [%4, #256] \n" "ld1 {v9.4s, v10.4s}, [%4] \n" // v9 v10 = r10 r14 "add %4, %4, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" //r11 "fmul v13.4s, v9.4s, %19.s[1] \n" "fmla v8.4s, v9.4s, %18.s[0] \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" //r12 "fmla v7.4s, v11.4s, %19.s[2] \n" "fmul v14.4s, v11.4s, %18.s[1] \n" "ext v11.16b, v9.16b, v10.16b, #12 \n" //r13 "fmla v13.4s, v12.4s, %19.s[3] \n" "fmla v8.4s, v12.4s, %18.s[2] \n" "fmla v7.4s, v11.4s, %20.s[0] \n" "fmla v14.4s, v11.4s, %18.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "fmla v13.4s, v10.4s, %20.s[1] \n" "fmla v8.4s, v10.4s, %19.s[0] \n" // r2 "ld1 {v9.4s, v10.4s}, [%5] \n" // v9 v10 = r20 r24 "add %5, %5, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" //r21 "fmla v7.4s, v9.4s, %20.s[2] \n" "fmla v14.4s, v9.4s, %19.s[1] \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" //r22 "fmla v13.4s, v11.4s, %20.s[3] \n" "fmla v8.4s, v11.4s, %19.s[2] \n" "ext v11.16b, v9.16b, v10.16b, #12 \n" //r23 "fmla v7.4s, v12.4s, %21.s[0] \n" "fmla v14.4s, v12.4s, %19.s[3] \n" "fmla v13.4s, v11.4s, %21.s[1] \n" "fmla v8.4s, v11.4s, %20.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "fmla v7.4s, v10.4s, %21.s[2] \n" "fmla v14.4s, v10.4s, %20.s[1] \n" // r3 "ld1 {v9.4s, v10.4s}, [%6] \n" // v9 v10 = r30 r34 "add %6, %6, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" //r31 "fmla v13.4s, v9.4s, %21.s[3] \n" "fmla v8.4s, v9.4s, %20.s[2] \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" //r32 "fmla v7.4s, v11.4s, %22.s[0] \n" "fmla v14.4s, v11.4s, %20.s[3] \n" "ext v11.16b, v9.16b, v10.16b, #12 \n" //r33 "fmla v13.4s, v12.4s, %22.s[1] \n" "fmla v8.4s, v12.4s, %21.s[0] \n" "fmla v7.4s, v11.4s, %22.s[2] \n" "fmla v14.4s, v11.4s, %21.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "fmla v13.4s, v10.4s, %22.s[3] \n" "fmla v8.4s, v10.4s, %21.s[2] \n" // r4 "ld1 {v9.4s, v10.4s}, [%7] \n" // v9 v10 = r40 r44 "add %7, %7, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" //r41 "fmla v7.4s, v9.4s, %23.s[0] \n" "fmla v14.4s, v9.4s, %21.s[3] \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" //r41 "fmla v13.4s, v11.4s, %23.s[1] \n" "fmla v8.4s, v11.4s, %22.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #12 \n" //r41 "fmla v7.4s, v12.4s, %23.s[2] \n" "fmla v14.4s, v12.4s, %22.s[1] \n" "fmla v13.4s, v11.4s, %23.s[3] \n" "fmla v8.4s, v11.4s, %22.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "fmla v7.4s, v10.4s, %24.s[0] \n" "fmla v14.4s, v10.4s, %22.s[3] \n" // r0 and r5 "ld1 {v9.4s, v10.4s}, [%3] \n" // v9 v10 = r00 r04 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" //r01 "fmla v13.4s, v11.4s, %18.s[1] \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" //r02 "fmla v7.4s, v12.4s, %18.s[2] \n" "ext v11.16b, v9.16b, v10.16b, #12 \n" //r03 "prfm pldl1keep, [%8, #256] \n" "fmla v13.4s, v11.4s, %18.s[3] \n" // r5 "ld1 {v11.4s, v12.4s}, [%8] \n" // v11 v12 = r50 r54 "add %8, %8, #16 \n" "fmla v8.4s, v11.4s, %23.s[0] \n" "fmla v14.4s, v12.4s, %24.s[0] \n" "fmla v7.4s, v9.4s, %18.s[0] \n" "fmla v13.4s, v10.4s, %19.s[0] \n" "ext v9.16b, v11.16b, v12.16b, #4 \n" //r51 "ext v10.16b, v11.16b, v12.16b, #8 \n" //r52 "fmla v14.4s, v9.4s, %23.s[1] \n" "ext v9.16b, v11.16b, v12.16b, #12 \n" //r53 "fmla v8.4s, v10.4s, %23.s[2] \n" "fmla v14.4s, v9.4s, %23.s[3] \n" "fadd v7.4s, v7.4s, v13.4s \n" "st1 {v7.4s}, [%1], #16 \n" "fadd v8.4s, v8.4s, v14.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // v7 = out "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( // "veor q13, q13 \n" // "veor q14, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n" // q7 = out "0: \n" // q11 = rx1 / rx3 // q12 = rx2 // q13 q14 = intermediate sum register "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n" // q8 = out2 "pld [%4, #256] \n" // r1 "vld1.f32 {d18-d21}, [%4] \n" // q9 q10 = r10 r14 "add %4, #16 \n" "vext.32 q11, q9, q10, #1 \n" // r11 "vmul.f32 q13, q9, %e19[1] \n" "vmla.f32 q8, q9, %e18[0] \n" "vext.32 q12, q9, q10, #2 \n" // r12 "vmla.f32 q7, q11, %f19[0] \n" "vmul.f32 q14, q11, %e18[1] \n" "vext.32 q11, q9, q10, #3 \n" // r13 "vmla.f32 q13, q12, %f19[1] \n" "vmla.f32 q8, q12, %f18[0] \n" "vmla.f32 q7, q11, %e20[0] \n" "vmla.f32 q14, q11, %f18[1] \n" "pld [%5, #256] \n" "vmla.f32 q13, q10, %e20[1] \n" "vmla.f32 q8, q10, %e19[0] \n" // r2 "vld1.f32 {d18-d21}, [%5] \n" // q9 q10 = r20 r24 "add %5, #16 \n" "vext.32 q11, q9, q10, #1 \n" // r21 "vmla.f32 q7, q9, %f20[0] \n" "vmla.f32 q14, q9, %e19[1] \n" "vext.32 q12, q9, q10, #2 \n" // r22 "vmla.f32 q13, q11, %f20[1] \n" "vmla.f32 q8, q11, %f19[0] \n" "vext.32 q11, q9, q10, #3 \n" // r23 "vmla.f32 q7, q12, %e21[0] \n" "vmla.f32 q14, q12, %f19[1] \n" "vmla.f32 q13, q11, %e21[1] \n" "vmla.f32 q8, q11, %e20[0] \n" "pld [%6, #256] \n" "vmla.f32 q7, q10, %f21[0] \n" "vmla.f32 q14, q10, %e20[1] \n" // r3 "vld1.f32 {d18-d21}, [%6] \n" // q9 q10 = r30 r34 "add %6, #16 \n" "vext.32 q11, q9, q10, #1 \n" // r31 "vmla.f32 q13, q9, %f21[1] \n" "vmla.f32 q8, q9, %f20[0] \n" "vext.32 q12, q9, q10, #2 \n" // r32 "vmla.f32 q7, q11, %e22[0] \n" "vmla.f32 q14, q11, %f20[1] \n" "vext.32 q11, q9, q10, #3 \n" // r33 "vmla.f32 q13, q12, %e22[1] \n" "vmla.f32 q8, q12, %e21[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q14, q11, %e21[1] \n" "pld [%7, #256] \n" "vmla.f32 q13, q10, %f22[1] \n" "vmla.f32 q8, q10, %f21[0] \n" // r4 "vld1.f32 {d18-d21}, [%7] \n" // q9 q10 = r40 r44 "add %7, #16 \n" "vext.32 q11, q9, q10, #1 \n" // r41 "vmla.f32 q7, q9, %e23[0] \n" "vmla.f32 q14, q9, %f21[1] \n" "vext.32 q12, q9, q10, #2 \n" // r42 "vmla.f32 q13, q11, %e23[1] \n" "vmla.f32 q8, q11, %e22[0] \n" "vext.32 q11, q9, q10, #3 \n" // r43 "vmla.f32 q7, q12, %f23[0] \n" "vmla.f32 q14, q12, %e22[1] \n" "vmla.f32 q13, q11, %f23[1] \n" "vmla.f32 q8, q11, %f22[0] \n" "pld [%3, #256] \n" "vmla.f32 q7, q10, %e24[0] \n" "vmla.f32 q14, q10, %f22[1] \n" // r0 and r5 "vld1.f32 {d18-d21}, [%3] \n" // q9 q10 = r00 r04 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" // r01 "vmla.f32 q13, q11, %e18[1] \n" "vext.32 q12, q9, q10, #2 \n" // r02 "vmla.f32 q7, q12, %f18[0] \n" "vext.32 q11, q9, q10, #3 \n" // r03 "pld [%8, #256] \n" "vmla.f32 q13, q11, %f18[1] \n" // r5 "vld1.f32 {d22-d25}, [%8] \n" // q11 q12 = r50 r54 "add %8, #16 \n" "vmla.f32 q8, q11, %e23[0] \n" "vmla.f32 q14, q12, %e24[0] \n" "vmla.f32 q7, q9, %e18[0] \n" "vmla.f32 q13, q10, %e19[0] \n" "vext.32 q9, q11, q12, #1 \n" // r51 "vext.32 q10, q11, q12, #2 \n" // r52 "vmla.f32 q14, q9, %e23[1] \n" "vext.32 q9, q11, q12, #3 \n" // r53 "vmla.f32 q8, q10, %f23[0] \n" "vmla.f32 q14, q9, %f23[1] \n" "vadd.f32 q7, q7, q13 \n" // "veor q13, q13 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vadd.f32 q8, q8, q14 \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n" // q7 = out // "veor q14, q14 \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424) // %24 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; #if __ARM_NEON float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 = r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // _r00 = vld1q_f32(r0+j); "add %2, %2, #16 \n" "0: \n" "ld1 {v7.4s}, [%1] \n" // _sum = vld1q_f32(outptr+j); "ext v10.16b, v8.16b, v9.16b, #4 \n" //_r01 "ext v11.16b, v8.16b, v9.16b, #8 \n" //_r02 "ext v12.16b, v8.16b, v9.16b, #12 \n" //_r03 "fmla v7.4s, v8.4s, %14.s[0] \n" "fmul v13.4s, v10.4s, %14.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "fmul v14.4s, v11.4s, %14.s[2] \n" "fmul v15.4s, v12.4s, %14.s[3] \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" "add %3, %3, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" //_r11 "ext v11.16b, v8.16b, v9.16b, #8 \n" //_r12 "ext v12.16b, v8.16b, v9.16b, #12 \n" //_r13 "fmla v7.4s, v8.4s, %15.s[1] \n" "fmla v13.4s, v10.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "fmla v14.4s, v11.4s, %15.s[3] \n" "fmla v15.4s, v12.4s, %16.s[0] \n" "fmla v7.4s, v9.4s, %16.s[1] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" "add %4, %4, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" //_r21 "ext v11.16b, v8.16b, v9.16b, #8 \n" //_r22 "ext v12.16b, v8.16b, v9.16b, #12 \n" //_r23 "fmla v7.4s, v8.4s, %16.s[2] \n" "fmla v13.4s, v10.4s, %16.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "fmla v14.4s, v11.4s, %17.s[0] \n" "fmla v15.4s, v12.4s, %17.s[1] \n" "fmla v7.4s, v9.4s, %17.s[2] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" "add %5, %5, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" //_r31 "ext v11.16b, v8.16b, v9.16b, #8 \n" //_r32 "ext v12.16b, v8.16b, v9.16b, #12 \n" //_r33 "fmla v7.4s, v8.4s, %17.s[3] \n" "fmla v13.4s, v10.4s, %18.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "fmla v14.4s, v11.4s, %18.s[1] \n" "fmla v15.4s, v12.4s, %18.s[2] \n" "fmla v7.4s, v9.4s, %18.s[3] \n" "ld1 {v8.4s, v9.4s}, [%6] \n" "add %6, %6, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" //_r41 "ext v11.16b, v8.16b, v9.16b, #8 \n" //_r42 "ext v12.16b, v8.16b, v9.16b, #12 \n" //_r43 "fmla v7.4s, v8.4s, %19.s[0] \n" "fmla v13.4s, v10.4s, %19.s[1] \n" "fmla v14.4s, v11.4s, %19.s[2] \n" "fmla v15.4s, v12.4s, %19.s[3] \n" "fmla v7.4s, v9.4s, %20.s[0] \n" "fadd v14.4s, v14.4s, v15.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "prfm pldl1keep, [%2, #256] \n" "fadd v7.4s, v7.4s, v14.4s \n" "ld1 {v8.4s, v9.4s}, [%2] \n" "add %2, %2, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sum3 = 0; "pld [%1, #128] \n" "pld [%2, #256] \n" "vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n" // _sum = vld1q_f32(outptr+j); // "veor q13, q13 \n"// _sum2 = 0; // "veor q14, q14 \n"// _sum3 = 0; "vext.32 q10, q8, q9, #1 \n" // _r01 "vext.32 q11, q8, q9, #2 \n" // _r02 "vext.32 q12, q8, q9, #3 \n" // _r03 "vmla.f32 q7, q8, %e14[0] \n" "vmul.f32 q13, q10, %e14[1] \n" "pld [%3, #256] \n" "vmul.f32 q14, q11, %f14[0] \n" "vmul.f32 q15, q12, %f14[1] \n" "vmla.f32 q7, q9, %e15[0] \n" "vld1.f32 {d16-d19}, [%3] \n" "add %3, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q10, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q11, %f15[1] \n" "vmla.f32 q15, q12, %e16[0] \n" "vmla.f32 q7, q9, %e16[1] \n" "vld1.f32 {d16-d19}, [%4] \n" "add %4, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q10, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q11, %e17[0] \n" "vmla.f32 q15, q12, %e17[1] \n" "vmla.f32 q7, q9, %f17[0] \n" "vld1.f32 {d16-d19}, [%5] \n" "add %5, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q10, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q11, %e18[1] \n" "vmla.f32 q15, q12, %f18[0] \n" "vmla.f32 q7, q9, %f18[1] \n" "vld1.f32 {d16-d19}, [%6] \n" "add %6, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vext.32 q12, q8, q9, #3 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q10, %e19[1] \n" "vmla.f32 q14, q11, %f19[0] \n" "vmla.f32 q15, q12, %f19[1] \n" "vmla.f32 q7, q9, %e20[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sum3 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j); "add %2, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum = r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" // v8 = 0 2 4 6 q9 = 1 3 5 7 "prfm pldl1keep, [%2, #256] \n" "ld2 {v10.4s, v11.4s}, [%2] \n" // v10 = 8 10 12 14 v11 = 9 11 13 15 "prfm pldl1keep, [%1, #128] \n" "0: \n" "ld1 {v7.4s}, [%1] \n" // v7 = outptr "ext v12.16b, v8.16b, v10.16b, #4 \n" // v12 = 2 4 6 8 "ext v11.16b, v9.16b, v11.16b, #4 \n" // v11 = 3 5 7 9 "ext v10.16b, v8.16b, v10.16b, #8 \n" // v10 = 4 6 8 10 "fmla v7.4s, v8.4s, %14.s[0] \n" "fmul v13.4s, v9.4s, %14.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "fmul v14.4s, v12.4s, %14.s[2] \n" "fmul v15.4s, v11.4s, %14.s[3] \n" "fmla v7.4s, v10.4s, %15.s[0] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v10.4s, v11.4s}, [%3] \n" "ext v12.16b, v8.16b, v10.16b, #4 \n" "ext v11.16b, v9.16b, v11.16b, #4 \n" "ext v10.16b, v8.16b, v10.16b, #8 \n" "fmla v7.4s, v8.4s, %15.s[1] \n" "fmla v13.4s, v9.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "fmla v14.4s, v12.4s, %15.s[3] \n" "fmla v15.4s, v11.4s, %16.s[0] \n" "fmla v7.4s, v10.4s, %16.s[1] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "ext v12.16b, v8.16b, v10.16b, #4 \n" "ext v11.16b, v9.16b, v11.16b, #4 \n" "ext v10.16b, v8.16b, v10.16b, #8 \n" "fmla v7.4s, v8.4s, %16.s[2] \n" "fmla v13.4s, v9.4s, %16.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "fmla v14.4s, v12.4s, %17.s[0] \n" "fmla v15.4s, v11.4s, %17.s[1] \n" "fmla v7.4s, v10.4s, %17.s[2] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "ext v12.16b, v8.16b, v10.16b, #4 \n" "ext v11.16b, v9.16b, v11.16b, #4 \n" "ext v10.16b, v8.16b, v10.16b, #8 \n" "fmla v7.4s, v8.4s, %17.s[3] \n" "fmla v13.4s, v9.4s, %18.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "fmla v14.4s, v12.4s, %18.s[1] \n" "fmla v15.4s, v11.4s, %18.s[2] \n" "fmla v7.4s, v10.4s, %18.s[3] \n" "ld2 {v8.4s, v9.4s}, [%6], #32 \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v10.4s, v11.4s}, [%6] \n" "ext v12.16b, v8.16b, v10.16b, #4 \n" "ext v11.16b, v9.16b, v11.16b, #4 \n" "ext v10.16b, v8.16b, v10.16b, #8 \n" "fmla v7.4s, v8.4s, %19.s[0] \n" "fmla v13.4s, v9.4s, %19.s[1] \n" "fmla v14.4s, v12.4s, %19.s[2] \n" "fmla v15.4s, v11.4s, %19.s[3] \n" "fmla v7.4s, v10.4s, %20.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "fadd v14.4s, v14.4s, v15.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "prfm pldl1keep, [%2, #256] \n" "fadd v7.4s, v7.4s, v14.4s \n" "ld2 {v10.4s, v11.4s}, [%2] \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( // "veor q15, q15 \n"// _sump3 = 0; // "veor q13, q13 \n"// _sump2 = 0; // "veor q14, q14 \n"// _sump3 = 0; "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15 "pld [%1, #128] \n" "0: \n" "vld1.f32 {d14-d15}, [%1] \n" // q7 = outptr "vext.32 q12, q8, q10, #1 \n" // q12 = 2 4 6 8 "vext.32 q11, q9, q11, #1 \n" // q11 = 3 5 7 9 "vext.32 q10, q8, q10, #2 \n" // q10 = 4 6 8 10 "vmla.f32 q7, q8, %e14[0] \n" "vmul.f32 q13, q9, %e14[1] \n" "pld [%3, #256] \n" "vmul.f32 q14, q12, %f14[0] \n" "vmul.f32 q15, q11, %f14[1] \n" "vmla.f32 q7, q10, %e15[0] \n" "vld2.f32 {d16-d19}, [%3]! \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e15[1] \n" "vmla.f32 q13, q9, %f15[0] \n" "pld [%4, #256] \n" "vmla.f32 q14, q12, %f15[1] \n" "vmla.f32 q15, q11, %e16[0] \n" "vmla.f32 q7, q10, %e16[1] \n" "vld2.f32 {d16-d19}, [%4]! \n" "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f16[0] \n" "vmla.f32 q13, q9, %f16[1] \n" "pld [%5, #256] \n" "vmla.f32 q14, q12, %e17[0] \n" "vmla.f32 q15, q11, %e17[1] \n" "vmla.f32 q7, q10, %f17[0] \n" "vld2.f32 {d16-d19}, [%5]! \n" "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %f17[1] \n" "vmla.f32 q13, q9, %e18[0] \n" "pld [%6, #256] \n" "vmla.f32 q14, q12, %e18[1] \n" "vmla.f32 q15, q11, %f18[0] \n" "vmla.f32 q7, q10, %f18[1] \n" "vld2.f32 {d16-d19}, [%6]! \n" "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6] \n" "vext.32 q12, q8, q10, #1 \n" "vext.32 q11, q9, q11, #1 \n" "vext.32 q10, q8, q10, #2 \n" "vmla.f32 q7, q8, %e19[0] \n" "vmla.f32 q13, q9, %e19[1] \n" "vmla.f32 q14, q12, %f19[0] \n" "vmla.f32 q15, q11, %f19[1] \n" "vmla.f32 q7, q10, %e20[0] \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7 "vadd.f32 q14, q14, q15 \n" "vadd.f32 q7, q7, q13 \n" // "veor q15, q15 \n"// _sump3 = 0; // "veor q13, q13 \n"// _sump2 = 0; "pld [%2, #256] \n" "vadd.f32 q7, q7, q14 \n" "vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15 // "veor q14, q14 \n"// _sump3 = 0; "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424) // %20 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = 0; #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <chrono> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <map> #include <memory> #include <sstream> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> #include <mimalloc.h> #ifdef _MSC_VER #include <intrin.h> #pragma intrinsic(_BitScanReverse) #endif #if defined(_MSC_VER) #include <malloc.h> #elif MM_MALLOC #include <mm_malloc.h> // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html // https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html #elif defined(__GNUC__) && defined(HAVE_MALLOC_H) #include <malloc.h> #define _mm_malloc(a, b) memalign(b, a) #define _mm_free(a) free(a) #else #include <stdlib.h> #define _mm_malloc(a, b) malloc(a) #define _mm_free(a) free(a) #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string &str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string, mi_stl_allocator<std::string>> Split(const char *c_str, char delimiter) { std::vector<std::string, mi_stl_allocator<std::string>> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string, mi_stl_allocator<std::string>> SplitBrackets(const char *c_str, char left_delimiter, char right_delimiter) { std::vector<std::string, mi_stl_allocator<std::string>> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; bool open = false; while (pos < str.length()) { if (str[pos] == left_delimiter) { open = true; ++pos; i = pos; } else if (str[pos] == right_delimiter && open) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } open = false; ++pos; } else { ++pos; } } return ret; } inline static std::vector<std::string, mi_stl_allocator<std::string>> SplitLines(const char *c_str) { std::vector<std::string, mi_stl_allocator<std::string>> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string, mi_stl_allocator<std::string>> Split(const char *c_str, const char *delimiters) { std::vector<std::string, mi_stl_allocator<std::string>> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template <typename T> inline static const char *Atoi(const char *p, T *out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template <typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base * base, power / 2); } else if (power % 3 == 0) { return Pow(base * base * base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char *Atof(const char *p, double *out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char *p, int *out) { const char *after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char *p, double *out) { const char *after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}; #ifdef _MSC_VER // NOLINTNEXTLINE unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char *buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'}; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char *buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char *buffer, size_t buffer_len) { #ifdef _MSC_VER int num_chars = sprintf_s(buffer, buffer_len, "%.17g", value); #else int num_chars = snprintf(buffer, buffer_len, "%.17g", value); #endif CHECK_GE(num_chars, 0); } inline static const char *SkipSpaceAndTab(const char *p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char *SkipReturn(const char *p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template <typename T, typename T2> inline static std::vector<T2, mi_stl_allocator<T2>> ArrayCast(const std::vector<T, mi_stl_allocator<T>> &arr) { std::vector<T2, mi_stl_allocator<T2>> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template <typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char *buffer, size_t) const { Int32ToStr(value, buffer); } }; template <typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char *buffer, size_t buf_len) const { #ifdef _MSC_VER int num_chars = sprintf_s(buffer, buf_len, "%g", value); #else int num_chars = snprintf(buffer, buf_len, "%g", value); #endif CHECK_GE(num_chars, 0); } }; template <typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char *buffer, size_t) const { Uint32ToStr(value, buffer); } }; template <typename T> inline static std::string ArrayToStringFast(const std::vector<T, mi_stl_allocator<T>> &arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char, mi_stl_allocator<char>> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double, mi_stl_allocator<double>> &arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char, mi_stl_allocator<char>> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template <typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string &str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template <typename T> struct __StringToTHelper<T, true> { T operator()(const std::string &str) const { return static_cast<T>(std::stod(str)); } }; template <typename T> inline static std::vector<T, mi_stl_allocator<T>> StringToArray(const std::string &str, char delimiter) { std::vector<std::string, mi_stl_allocator<std::string>> strs = Split(str.c_str(), delimiter); std::vector<T, mi_stl_allocator<T>> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto &s : strs) { ret.push_back(helper(s)); } return ret; } template <typename T> inline static std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> StringToArrayofArrays( const std::string &str, char left_bracket, char right_bracket, char delimiter) { std::vector<std::string, mi_stl_allocator<std::string>> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket); std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> ret; for (const auto &s : strs) { ret.push_back(StringToArray<T>(s, delimiter)); } return ret; } template <typename T> inline static std::vector<T, mi_stl_allocator<T>> StringToArray(const std::string &str, int n) { if (n == 0) { return std::vector<T, mi_stl_allocator<T>>(); } std::vector<std::string, mi_stl_allocator<std::string>> strs = Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T, mi_stl_allocator<T>> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto &s : strs) { ret.push_back(helper(s)); } return ret; } template <typename T, bool is_float> struct __StringToTHelperFast { const char *operator()(const char *p, T *out) const { return Atoi(p, out); } }; template <typename T> struct __StringToTHelperFast<T, true> { const char *operator()(const char *p, T *out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template <typename T> inline static std::vector<T, mi_stl_allocator<T>> StringToArrayFast(const std::string &str, int n) { if (n == 0) { return std::vector<T, mi_stl_allocator<T>>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T, mi_stl_allocator<T>> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template <typename T> inline static std::string Join(const std::vector<T, mi_stl_allocator<T>> &strs, const char *delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template <> inline std::string Join<int8_t>(const std::vector<int8_t, mi_stl_allocator<int8_t>> &strs, const char *delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template <typename T> inline static std::string Join(const std::vector<T, mi_stl_allocator<T>> &strs, size_t start, size_t end, const char *delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double, mi_stl_allocator<double>> *p_rec) { std::vector<double, mi_stl_allocator<double>> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double *input, double *output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template <typename T> std::vector<const T *, mi_stl_allocator<const T *>> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>, mi_stl_allocator<std::unique_ptr<T>>> &input) { std::vector<const T *, mi_stl_allocator<const T *>> ret; for (auto t = input.begin(); t != input.end(); ++t) { ret.push_back(t->get()); } return ret; } template <typename T1, typename T2> inline static void SortForPair(std::vector<T1, mi_stl_allocator<T1>> *keys, std::vector<T2, mi_stl_allocator<T2>> *values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>, mi_stl_allocator<std::pair<T1, T2>>> arr; auto &ref_key = *keys; auto &ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2> &a, const std::pair<T1, T2> &b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2> &a, const std::pair<T1, T2> &b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T *, mi_stl_allocator<T *>> Vector2Ptr(std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> *data) { std::vector<T *, mi_stl_allocator<T *>> ptr(data->size()); auto &ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int, mi_stl_allocator<int>> VectorSize(const std::vector<std::vector<T, mi_stl_allocator<T>>, mi_stl_allocator<std::vector<T, mi_stl_allocator<T>>>> &data) { std::vector<int, mi_stl_allocator<int>> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template <typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type *IteratorValType(_Iter) { return (0); } template <typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt *) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = OMP_NUM_THREADS(); if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size * i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt, mi_stl_allocator<_VTRanIt>> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template <typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t, mi_stl_allocator<uint32_t>> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t, mi_stl_allocator<uint32_t>>(size); } template <typename T> inline static void InsertBitset(std::vector<uint32_t, mi_stl_allocator<uint32_t>> *vec, const T val) { auto &ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template <typename T> inline static std::vector<uint32_t, mi_stl_allocator<uint32_t>> ConstructBitset(const T *vals, int n) { std::vector<uint32_t, mi_stl_allocator<uint32_t>> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template <typename T> inline static bool FindInBitset(const uint32_t *bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY); } inline static size_t GetLine(const char *str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char *SkipNewLine(const char *str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckAllowedJSON(const std::string &s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } inline int RoundInt(double x) { return static_cast<int>(x + 0.5f); } template <typename T, std::size_t N = 32> class AlignmentAllocator { public: typedef T value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T *pointer; typedef const T *const_pointer; typedef T &reference; typedef const T &const_reference; inline AlignmentAllocator() throw() {} template <typename T2> inline AlignmentAllocator(const AlignmentAllocator<T2, N> &) throw() {} inline ~AlignmentAllocator() throw() {} inline pointer adress(reference r) { return &r; } inline const_pointer adress(const_reference r) const { return &r; } inline pointer allocate(size_type n) { return (pointer)_mm_malloc(n * sizeof(value_type), N); } inline void deallocate(pointer p, size_type) { _mm_free(p); } inline void construct(pointer p, const value_type &wert) { new (p) value_type(wert); } inline void destroy(pointer p) { p->~value_type(); } inline size_type max_size() const throw() { return size_type(-1) / sizeof(value_type); } template <typename T2> struct rebind { typedef AlignmentAllocator<T2, N> other; }; bool operator!=(const AlignmentAllocator<T, N> &other) const { return !(*this == other); } // Returns true if and only if storage allocated from *this // can be deallocated from other, and vice versa. // Always returns true for stateless allocators. bool operator==(const AlignmentAllocator<T, N> &) const { return true; } }; class Timer { public: Timer() { #ifdef TIMETAG int num_threads = OMP_NUM_THREADS(); start_time_.resize(num_threads); stats_.resize(num_threads); #endif // TIMETAG } ~Timer() { Print(); } #ifdef TIMETAG void Start(const std::string &name) { auto tid = omp_get_thread_num(); start_time_[tid][name] = std::chrono::steady_clock::now(); } void Stop(const std::string &name) { auto cur_time = std::chrono::steady_clock::now(); auto tid = omp_get_thread_num(); if (stats_[tid].find(name) == stats_[tid].end()) { stats_[tid][name] = std::chrono::duration<double, std::milli>(0); } stats_[tid][name] += cur_time - start_time_[tid][name]; } #else void Start(const std::string &) { } void Stop(const std::string &) {} #endif // TIMETAG void Print() const { #ifdef TIMETAG std::unordered_map<std::string, std::chrono::duration<double, std::milli>> stats(stats_[0].begin(), stats_[0].end()); for (size_t i = 1; i < stats_.size(); ++i) { for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) { if (stats.find(it->first) == stats.end()) { stats[it->first] = it->second; } else { stats[it->first] += it->second; } } } std::map<std::string, std::chrono::duration<double, std::milli>> ordered( stats.begin(), stats.end()); for (auto it = ordered.begin(); it != ordered.end(); ++it) { Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3); } #endif // TIMETAG } #ifdef TIMETAG std::vector< std::unordered_map<std::string, std::chrono::steady_clock::time_point>> start_time_; std::vector<std::unordered_map<std::string, std::chrono::duration<double, std::milli>>> stats_; #endif // TIMETAG }; // Note: this class is not thread-safe, don't use it inside omp blocks class FunctionTimer { public: #ifdef TIMETAG FunctionTimer(const std::string &name, Timer &timer) : timer_(timer) { timer.Start(name); name_ = name; } ~FunctionTimer() { timer_.Stop(name_); } private: std::string name_; Timer &timer_; #else FunctionTimer(const std::string &, Timer &) { } #endif // TIMETAG }; } // namespace Common extern Common::Timer global_timer; } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
sparse_matrix_multiplication_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED ) #define KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED // System includes #include <vector> #include <math.h> #include <algorithm> #include <numeric> #ifdef _OPENMP #include <omp.h> #endif // External includes #include "amgcl/value_type/interface.hpp" // Project includes #include "includes/define.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class SparseMatrixMultiplicationUtility * @ingroup ContactStructuralMechanicsApplication * @brief An utility to multiply sparse matrix in Ublas * @details Taken and adapted for ublas from external_libraries/amgcl/detail/spgemm.hpp by Denis Demidov <dennis.demidov@gmail.com> * @todo Remove as soon as we do not depend of Ublas anymore... * @author Vicente Mataix Ferrandiz */ class SparseMatrixMultiplicationUtility { public: ///@name Type Definitions ///@{ /// Pointer definition of TreeContactSearch KRATOS_CLASS_POINTER_DEFINITION( SparseMatrixMultiplicationUtility ); /// The size type typedef std::size_t SizeType; /// The index type typedef std::size_t IndexType; /// The signed index type typedef std::ptrdiff_t SignedIndexType; /// A vector of indexes typedef DenseVector<IndexType> IndexVectorType; /// A vector of indexes (signed) typedef DenseVector<SignedIndexType> SignedIndexVectorType; ///@} ///@name Life Cycle ///@{ /// Default constructor SparseMatrixMultiplicationUtility(){}; /// Desctructor virtual ~SparseMatrixMultiplicationUtility()= default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Metafunction that returns value type of a matrix or a vector type. template <class T, class Enable = void> struct value_type { typedef typename T::value_type type; }; /** * @brief Matrix-matrix product C = A·B * @detail This method uses a template for each matrix * @param rA The first matrix * @param rB The second matrix * @param rC The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplication( const AMatrix& rA, const BMatrix& rB, CMatrix& rC ) { #ifdef _OPENMP const int nt = omp_get_max_threads(); #else const int nt = 1; #endif if (nt > 16) { MatrixMultiplicationRMerge(rA, rB, rC); } else { MatrixMultiplicationSaad(rA, rB, rC); } } /** * @brief The first is an OpenMP-enabled modification of classic algorithm from Saad * @details It is used whenever number of OpenMP cores is 4 or less. Saad, Yousef. Iterative methods for sparse linear systems. Siam, 2003. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationSaad( const AMatrix& A, const BMatrix& B, CMatrix& C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A, B and C data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; IndexType C_cols = 0; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] != ia) { marker[cb] = ia; ++C_cols; } } } c_ptr[ia + 1] = C_cols; } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { SignedIndexVectorType marker(ncols); for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill) marker[i_fill] = -1; #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; const IndexType row_beg = c_ptr[ia]; IndexType row_end = row_beg; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; const IndexType row_begin_b = index1_b[ca]; const IndexType row_end_b = index1_b[ca+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < static_cast<SignedIndexType>(row_beg)) { marker[cb] = row_end; aux_index2_c[row_end] = cb; aux_val_c[row_end] = va * vb; ++row_end; } else { aux_val_c[marker[cb]] += va * vb; } } } } } // We reorder the rows SortRows(c_ptr, nrows, ncols, aux_index2_c, aux_val_c); // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief Row-merge algorithm from Rupp et al. * @details The algorithm requires less memory and shows much better scalability than classic one. It is used when number of OpenMP cores is more than 4. * @param A The first matrix to multiply * @param B The second matrix to multiply * @param C The resulting matrix */ template <class AMatrix, class BMatrix, class CMatrix> static void MatrixMultiplicationRMerge( const AMatrix &A, const BMatrix &B, CMatrix &C ) { typedef typename value_type<CMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = B.size2(); // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType max_row_width = 0; #pragma omp parallel { IndexType my_max = 0; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; IndexType row_width = 0; for(IndexType j = row_beg; j < row_end; ++j) { const IndexType a_col = index2_a[j]; row_width += index1_b[a_col + 1] - index1_b[a_col]; } my_max = std::max(my_max, row_width); } #pragma omp critical max_row_width = std::max(max_row_width, my_max); } #ifdef _OPENMP const int nthreads = omp_get_max_threads(); #else const int nthreads = 1; #endif std::vector< std::vector<IndexType> > tmp_col(nthreads); std::vector< std::vector<ValueType> > tmp_val(nthreads); for(int i = 0; i < nthreads; ++i) { tmp_col[i].resize(3 * max_row_width); tmp_val[i].resize(2 * max_row_width); } // We create the c_ptr auxiliar variable IndexType* c_ptr = new IndexType[nrows + 1]; c_ptr[0] = 0; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = &tmp_col[tid][0]; #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; c_ptr[i+1] = ProdRowWidth( index2_a + row_beg, index2_a + row_end, index1_b, index2_b, t_col, t_col + max_row_width, t_col + 2 * max_row_width ); } } // We initialize the sparse matrix std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr); const SizeType nonzero_values = c_ptr[nrows]; IndexType* aux_index2_c = new IndexType[nonzero_values]; ValueType* aux_val_c = new ValueType[nonzero_values]; #pragma omp parallel { #ifdef _OPENMP const int tid = omp_get_thread_num(); #else const int tid = 0; #endif IndexType* t_col = tmp_col[tid].data(); ValueType *t_val = tmp_val[tid].data(); #pragma omp for for(int i = 0; i < static_cast<int>(nrows); ++i) { const IndexType row_beg = index1_a[i]; const IndexType row_end = index1_a[i+1]; ProdRow(index2_a + row_beg, index2_a + row_end, values_a + row_beg, index1_b, index2_b, values_b, aux_index2_c + c_ptr[i], aux_val_c + c_ptr[i], t_col, t_val, t_col + max_row_width, t_val + max_row_width ); } } // We fill the matrix CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c); // Release memory delete[] c_ptr; delete[] aux_index2_c; delete[] aux_val_c; } /** * @brief The first is a method in order to sum to sparse matrices in a efficient way * @param A The resulting matrix * @param B The second matrix to sum */ template <class AMatrix, class BMatrix> static void MatrixAdd( AMatrix& A, const BMatrix& B, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Auxiliar sizes const SizeType nrows = A.size1(); const SizeType ncols = A.size2(); /* Some checks */ // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); KRATOS_ERROR_IF_NOT(nrows == B.size1()) << "The second matrix has a wrong number of rows" << std::endl; KRATOS_ERROR_IF_NOT(ncols == B.size2()) << "The second matrix has a wrong number of columns" << std::endl; // Get access to A and B data const IndexType* index1_a = A.index1_data().begin(); const IndexType* index2_a = A.index2_data().begin(); const double* values_a = A.value_data().begin(); const IndexType* index1_b = B.index1_data().begin(); const IndexType* index2_b = B.index2_data().begin(); const double* values_b = B.value_data().begin(); IndexType* new_a_ptr = new IndexType[nrows + 1]; new_a_ptr[0] = 0; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize IndexType new_A_cols = 0; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; marker[ca] = 1; ++new_A_cols; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; if (marker[cb] < 0) { marker[cb] = 1; ++new_A_cols; } } new_a_ptr[ia + 1] = new_A_cols; } } // We initialize the sparse matrix std::partial_sum(new_a_ptr, new_a_ptr + nrows + 1, new_a_ptr); const SizeType nonzero_values = new_a_ptr[nrows]; IndexType* aux_index2_new_a = new IndexType[nonzero_values]; ValueType* aux_val_new_a = new ValueType[nonzero_values]; #pragma omp parallel { #pragma omp for for(int ia = 0; ia < static_cast<int>(nrows); ++ia) { SignedIndexVectorType marker(ncols); for (int i = 0; i < static_cast<int>(ncols); ++i) marker[i] = -1; // Initialize const IndexType row_beg = new_a_ptr[ia]; IndexType row_end = row_beg; // Iterate over A const IndexType row_begin_a = index1_a[ia]; const IndexType row_end_a = index1_a[ia+1]; for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) { const IndexType ca = index2_a[ja]; const ValueType va = values_a[ja]; marker[ca] = row_end; aux_index2_new_a[row_end] = ca; aux_val_new_a[row_end] = va; ++row_end; } // Iterate over B const IndexType row_begin_b = index1_b[ia]; const IndexType row_end_b = index1_b[ia+1]; for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) { const IndexType cb = index2_b[jb]; const ValueType vb = values_b[jb]; if (marker[cb] < 0) { marker[cb] = row_end; aux_index2_new_a[row_end] = cb; aux_val_new_a[row_end] = Factor * vb; ++row_end; } else { aux_val_new_a[marker[cb]] += Factor * vb; } } } } // We reorder the rows SortRows(new_a_ptr, nrows, ncols, aux_index2_new_a, aux_val_new_a); // We fill the matrix CreateSolutionMatrix(A, nrows, ncols, new_a_ptr, aux_index2_new_a, aux_val_new_a); // Release memory delete[] new_a_ptr; delete[] aux_index2_new_a; delete[] aux_val_new_a; } /** * @brief This method computes of the transpose matrix of a given matrix * @param rA The resulting matrix * @param rB The second matrix to transpose */ template <class AMatrix, class BMatrix> static void TransposeMatrix( AMatrix& rA, const BMatrix& rB, const double Factor = 1.0 ) { typedef typename value_type<AMatrix>::type ValueType; // Get access to B data const IndexType* index1 = rB.index1_data().begin(); const IndexType* index2 = rB.index2_data().begin(); const ValueType* data = rB.value_data().begin(); const SizeType transpose_nonzero_values = rB.value_data().end() - rB.value_data().begin(); const SizeType size_system_1 = rB.size1(); const SizeType size_system_2 = rB.size2(); if (rA.size1() != size_system_2 || rA.size2() != size_system_1 ) { rA.resize(size_system_2, size_system_1, false); } IndexVectorType new_a_ptr(size_system_2 + 1); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2 + 1); ++i) new_a_ptr[i] = 0; IndexVectorType aux_index2_new_a(transpose_nonzero_values); DenseVector<ValueType> aux_val_new_a(transpose_nonzero_values); #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { #pragma omp atomic new_a_ptr[index2[j] + 1] += 1; } } // We initialize the blocks sparse matrix std::partial_sum(new_a_ptr.begin(), new_a_ptr.end(), &new_a_ptr[0]); IndexVectorType aux_indexes(size_system_2); #pragma omp parallel for for (int i = 0; i < static_cast<int>(size_system_2); ++i) aux_indexes[i] = 0; // #pragma omp parallel for for (int i=0; i<static_cast<int>(size_system_1); ++i) { IndexType row_begin = index1[i]; IndexType row_end = index1[i+1]; for (IndexType j=row_begin; j<row_end; j++) { const IndexType current_row = index2[j]; const IndexType initial_position = new_a_ptr[current_row]; const IndexType current_index = initial_position + aux_indexes[current_row]; aux_index2_new_a[current_index] = i; aux_val_new_a[current_index] = Factor * data[j]; // #pragma omp atomic aux_indexes[current_row] += 1; } } // We reorder the rows SortRows(&new_a_ptr[0], size_system_2, size_system_1, &aux_index2_new_a[0], &aux_val_new_a[0]); // We fill the matrix CreateSolutionMatrix(rA, size_system_2, size_system_1, &new_a_ptr[0], &aux_index2_new_a[0], &aux_val_new_a[0]); } /** * @brief This method is designed to create the final solution sparse matrix from the auxiliar values * @param C The matrix solution * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param AuxIndex2C The indexes of the nonzero columns * @param AuxValC The C array containing the values of the sparse matrix */ template <class CMatrix, typename TSize, typename Ptr, typename IndexType, typename ValueType> static inline void CreateSolutionMatrix( CMatrix& C, const TSize NRows, const TSize NCols, const Ptr* CPtr, const IndexType* AuxIndex2C, const ValueType* AuxValC ) { // Exiting just in case of empty matrix if ((NRows == 0) || (NCols == 0)) return void(); // Auxiliar values const TSize nonzero_values = CPtr[NRows]; C = CMatrix(NRows, NCols, nonzero_values); IndexType* index1_c = C.index1_data().begin(); IndexType* index2_c = C.index2_data().begin(); double* values_c = C.value_data().begin(); index1_c[0] = 0; for (TSize i = 0; i < NRows; i++) index1_c[i+1] = index1_c[i] + (CPtr[i+1] - CPtr[i]); #pragma omp parallel for for (int i = 0; i < static_cast<int>(nonzero_values); i++) { KRATOS_DEBUG_ERROR_IF(AuxIndex2C[i] > static_cast<IndexType>(NCols)) << "Index " << AuxIndex2C[i] <<" is greater than the number of columns " << NCols << std::endl; index2_c[i] = AuxIndex2C[i]; values_c[i] = AuxValC[i]; } C.set_filled(NRows+1, nonzero_values); } /** * @brief This method is designed to reorder the rows by columns * @param NRows The number of rows of the matrix * @param NCols The number of columns of the matrix * @param CPtr The indexes taht indicate the number of nonzero values in each column * @param Columns The columns of the problem * @param Values The values (to be ordered with the rows) */ template <typename TSize, typename Col, typename TIndexType, typename ValueType> static inline void SortRows( const TIndexType* CPtr, const TSize NRows, const TSize NCols, Col* Columns, ValueType* Values ) { #pragma omp parallel { #pragma omp for for (int i_row=0; i_row<static_cast<int>(NRows); i_row++) { const TIndexType row_beg = CPtr[i_row]; const TIndexType row_end = CPtr[i_row + 1]; for(IndexType j = 1; j < row_end - row_beg; ++j) { const IndexType c = Columns[j + row_beg]; const double v = Values[j + row_beg]; SignedIndexType i = j - 1; while(i >= 0 && Columns[i + row_beg] > c) { KRATOS_DEBUG_ERROR_IF(Columns[i + row_beg] > static_cast<Col>(NCols)) << " Index for column: " << i + row_beg << ". Index " << Columns[i + row_beg] <<" is greater than the number of columns " << NCols << std::endl; Columns[i + 1 + row_beg] = Columns[i + row_beg]; Values[i + 1 + row_beg] = Values[i + row_beg]; i--; } Columns[i + 1 + row_beg] = c; Values[i + 1 + row_beg] = v; } } } } /** * @brief This method assembles several sparse matrices into one large sparse matrix * @param rMatricespBlocks The pointers to the matrices we are interested in assemble * @param ContributionCoefficients The matrix containing the coefficients to be considered (copy, so we don't need to provide it) * @param TransposeBlocks The matrix containing the flags telling us to transpose the blocks (copy, so we don't need to provide it) */ static inline void AssembleSparseMatrixByBlocks( CompressedMatrix& rMatrix, const DenseMatrix<CompressedMatrix*>& rMatricespBlocks, DenseMatrix<double> ContributionCoefficients = DenseMatrix<double>(0,0), DenseMatrix<bool> TransposeBlocks = DenseMatrix<bool>(0,0) ) { const SizeType number_of_rows_blocks = rMatricespBlocks.size1(); const SizeType number_of_columns_blocks = rMatricespBlocks.size2(); // Fill the matrices if they are empty if (ContributionCoefficients.size1() == 0 && ContributionCoefficients.size2() == 0) { ContributionCoefficients.resize(number_of_rows_blocks, number_of_columns_blocks); for (IndexType i = 0; i < number_of_rows_blocks; ++i) { for (IndexType j = 0; j < number_of_columns_blocks; ++j) { ContributionCoefficients(i, j) = 1.0; } } } else { KRATOS_ERROR_IF(ContributionCoefficients.size1() != number_of_rows_blocks || ContributionCoefficients.size2() != number_of_columns_blocks) << "The ContributionCoefficients dimensions" << ContributionCoefficients.size1() << " and " << ContributionCoefficients.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl; } if (TransposeBlocks.size1() == 0 && TransposeBlocks.size2() == 0) { TransposeBlocks.resize(number_of_rows_blocks, number_of_columns_blocks); for (IndexType i = 0; i < number_of_rows_blocks; ++i) { for (IndexType j = 0; j < number_of_rows_blocks; ++j) { TransposeBlocks(i, j) = false; } } } else { KRATOS_ERROR_IF(TransposeBlocks.size1() != number_of_rows_blocks || TransposeBlocks.size2() != number_of_columns_blocks) << "The TransposeBlocks dimensions" << TransposeBlocks.size1() << " and " << TransposeBlocks.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl; } // Compute total size and check consistency of the different blocks SizeType nrows = 0, ncols = 0; std::vector<SizeType> row_sizes(number_of_rows_blocks); std::vector<SizeType> column_sizes(number_of_columns_blocks); for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { if (TransposeBlocks(i, 0)) { row_sizes[i] = (*rMatricespBlocks(i, 0)).size2(); } else { row_sizes[i] = (*rMatricespBlocks(i, 0)).size1(); } nrows += row_sizes[i]; } for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { if (TransposeBlocks(0, j)) { column_sizes[j] = (*rMatricespBlocks(0, j)).size1(); } else { column_sizes[j] = (*rMatricespBlocks(0, j)).size2(); } ncols += column_sizes[j]; } // Check consistency of all blocks for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { if (TransposeBlocks(i, j)) { KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size2() != row_sizes[i] || (*rMatricespBlocks(i, j)).size1() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size2() << ", " << (*rMatricespBlocks(i, j)).size1() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl; } else { KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size1() != row_sizes[i] || (*rMatricespBlocks(i, j)).size2() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size1() << ", " << (*rMatricespBlocks(i, j)).size2() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl; } } } // Exiting just in case of empty matrix if ((nrows == 0) || (ncols == 0)) return void(); // We will compute nonzero terms IndexType* matrix_ptr = new IndexType[nrows + 1]; #pragma omp parallel for for (int i = 0; i < static_cast<int>(nrows + 1); ++i) matrix_ptr[i] = 0; #ifdef KRATOS_DEBUG IndexType check_non_zero = 0; DenseMatrix<IndexType> check_non_zero_blocks(number_of_rows_blocks, number_of_columns_blocks); for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { check_non_zero_blocks(i, j) = 0; } } #endif #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) { IndexType matrix_cols_aux = 0; for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { #ifdef KRATOS_DEBUG IndexType partial_matrix_cols_aux = 0; #endif // Skip if empty matrix CompressedMatrix& r_matrix = *rMatricespBlocks(i, j); if (r_matrix.nnz() > 0) { if (TransposeBlocks(i, j)) { // We compute the transposed matrix const SizeType size_system_1 = r_matrix.size1(); const SizeType size_system_2 = r_matrix.size2(); CompressedMatrix transpose(size_system_2, size_system_1); TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix); ComputeNonZeroBlocks(transpose, k, matrix_cols_aux); #ifdef KRATOS_DEBUG ComputeNonZeroBlocks(transpose, k, partial_matrix_cols_aux); #endif } else { ComputeNonZeroBlocks(r_matrix, k, matrix_cols_aux); #ifdef KRATOS_DEBUG ComputeNonZeroBlocks(r_matrix, k, partial_matrix_cols_aux); #endif } } #ifdef KRATOS_DEBUG check_non_zero_blocks(i, j) += partial_matrix_cols_aux; #endif } IndexType& r_matrix_ptr_value = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k + 1]; #pragma omp atomic r_matrix_ptr_value += matrix_cols_aux; #ifdef KRATOS_DEBUG #pragma omp atomic check_non_zero += matrix_cols_aux; #endif } } } // Auxiliar values std::partial_sum(matrix_ptr, matrix_ptr + nrows + 1, matrix_ptr); const SizeType nonzero_values = matrix_ptr[nrows]; #ifdef KRATOS_DEBUG SizeType total_nnz = 0; for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { const SizeType block_nnz = rMatricespBlocks(i, j)->nnz(); KRATOS_ERROR_IF_NOT(check_non_zero_blocks(i, j) == block_nnz) << "Inconsistent number of non-zero values. Check 0: " << block_nnz << " vs " << check_non_zero_blocks(i, j) << ". Block: " << i << ", " << j << std::endl; total_nnz += block_nnz; } } KRATOS_ERROR_IF_NOT(check_non_zero == total_nnz) << "Inconsistent number of non-zero values. Check 1: " << total_nnz << " vs " << check_non_zero << std::endl; KRATOS_ERROR_IF_NOT(nonzero_values == total_nnz) << "Inconsistent number of non-zero values. Check 2: " << total_nnz << " vs " << nonzero_values << std::endl; #endif // Initialize matrix with the corresponding non-zero values rMatrix = CompressedMatrix(nrows, ncols, nonzero_values); // Fill the new matrix double* Matrix_values = rMatrix.value_data().begin(); IndexType* Matrix_index1 = rMatrix.index1_data().begin(); IndexType* Matrix_index2 = rMatrix.index2_data().begin(); Matrix_index1[0] = 0; for (IndexType i = 0; i < nrows; ++i) Matrix_index1[i+1] = Matrix_index1[i] + (matrix_ptr[i + 1] - matrix_ptr[i]); #pragma omp parallel { #pragma omp for for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) { for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) { const IndexType row_beg = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k]; IndexType row_end = row_beg; for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) { const SizeType initial_index_column = std::accumulate(column_sizes.begin(), column_sizes.begin() + j, 0); // Skip if empty matrix CompressedMatrix& r_matrix = *rMatricespBlocks(i, j); if (r_matrix.nnz() > 0) { if (TransposeBlocks(i, j)) { // We compute the transposed matrix const SizeType size_system_1 = r_matrix.size1(); const SizeType size_system_2 = r_matrix.size2(); CompressedMatrix transpose(size_system_2, size_system_1); TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix); ComputeAuxiliarValuesBlocks(transpose, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j)); } else { ComputeAuxiliarValuesBlocks(r_matrix, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j)); } } } } } } // Close the matrix rMatrix.set_filled(nrows+1, nonzero_values); // Release memory delete[] matrix_ptr; } /** * @brief This is a method to check the block containing nonzero values * @param rMatrix The auxiliar block * @param CurrentRow The current row computed * @param rNonZeroColsAux2 The nonzero rows array */ static inline void ComputeNonZeroBlocks( const CompressedMatrix& rMatrix, const int CurrentRow, IndexType& rNonZeroColsAux2 ) { // Get access to aux_K data const IndexType* aux_matrix_index1 = rMatrix.index1_data().begin(); const IndexType row_begin = aux_matrix_index1[CurrentRow]; const IndexType row_end = aux_matrix_index1[CurrentRow + 1]; for (IndexType j=row_begin; j<row_end; j++) { ++rNonZeroColsAux2; } } /** * @brief This is a method to compute the contribution of the auxiliar blocks * @param AuxK The auxiliar block * @param AuxIndex2 The indexes of the non zero columns * @param AuxVals The values of the final matrix * @param CurrentRow The current row computed * @param RowEnd The last column computed * @param InitialIndexColumn The initial column index of the auxiliar block in the final matrix */ static inline void ComputeAuxiliarValuesBlocks( const CompressedMatrix& rMatrix, IndexType* AuxIndex2, double* AuxVals, const int CurrentRow, IndexType& RowEnd, const SizeType InitialIndexColumn, const double ContributionCoefficient = 1.0 ) { // Get access to aux_K data const double* aux_values = rMatrix.value_data().begin(); const IndexType* aux_Matrix_index1 = rMatrix.index1_data().begin(); const IndexType* aux_Matrix_index2 = rMatrix.index2_data().begin(); const IndexType aux_Matrix_row_begin = aux_Matrix_index1[CurrentRow]; const IndexType aux_Matrix_row_end = aux_Matrix_index1[CurrentRow + 1]; for (IndexType j=aux_Matrix_row_begin; j<aux_Matrix_row_end; j++) { const IndexType col_index = InitialIndexColumn + aux_Matrix_index2[j]; AuxIndex2[RowEnd] = col_index; AuxVals[RowEnd] = ContributionCoefficient * aux_values[j]; ++RowEnd; } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const { return "SparseMatrixMultiplicationUtility"; } /// Print information about this object. void PrintInfo (std::ostream& rOStream) const { rOStream << "SparseMatrixMultiplicationUtility"; } /// Print object's data. void PrintData (std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method is oriented to merge rows * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Column3 The index of the third matrix column * @return The resulting row */ template <bool TNeedOut, class TIndex> static TIndex* MergeRows( const TIndex* Column1, const TIndex* Column1End, const TIndex* Column2, const TIndex* Column2End, TIndex* Column3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { if (TNeedOut) *Column3 = c1; ++Column1; } else if (c1 == c2) { if (TNeedOut) *Column3 = c1; ++Column1; ++Column2; } else { if (TNeedOut) *Column3 = c2; ++Column2; } ++Column3; } if (TNeedOut) { if (Column1 < Column1End) { return std::copy(Column1, Column1End, Column3); } else if (Column2 < Column2End) { return std::copy(Column2, Column2End, Column3); } else { return Column3; } } else { return Column3 + (Column1End - Column1) + (Column2End - Column2); } } /** * @brief This method is oriented to merge rows * @param rAlpha1 The coefficient of the first matrix * @param Column1 The index of the first matrix column * @param Column1End The last index of the first matrix column * @param Value1 The values of the first matrix * @param rAlpha2 The coefficient of the second matrix * @param Column2 The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Value2 The values of the second matrix * @param Column3 The index of the third matrix column * @param Value3 The values of the third matrix * @return The resulting row */ template <class TIndex, class TValueType> static TIndex* MergeRows( const TValueType &rAlpha1, const TIndex* Column1, const TIndex* Column1End, const TValueType *Value1, const TValueType &rAlpha2, const TIndex* Column2, const TIndex* Column2End, const TValueType *Value2, TIndex* Column3, TValueType *Value3 ) { while(Column1 != Column1End && Column2 != Column2End) { TIndex c1 = *Column1; TIndex c2 = *Column2; if (c1 < c2) { ++Column1; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++); } else if (c1 == c2) { ++Column1; ++Column2; *Column3 = c1; *Value3 = rAlpha1 * (*Value1++) + rAlpha2 * (*Value2++); } else { ++Column2; *Column3 = c2; *Value3 = rAlpha2 * (*Value2++); } ++Column3; ++Value3; } while(Column1 < Column1End) { *Column3++ = *Column1++; *Value3++ = rAlpha1 * (*Value1++); } while(Column2 < Column2End) { *Column3++ = *Column2++; *Value3++ = rAlpha2 * (*Value2++); } return Column3; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param Column2End The last index of the second matrix column * @param Tmp1Column Indexes of the columns of first matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @return The resulting row */ template <class TIndex> static TIndex ProdRowWidth( const TIndex* AColumn, const TIndex* AColumnEnd, const TIndex* BPtr, const TIndex* BColumn, TIndex* Tmp1Column, TIndex* Tmp2Column, TIndex* Tmp3Column ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return 0; /* Single row, just copy it to output */ if (nrow == 1) return BPtr[*AColumn + 1] - BPtr[*AColumn]; /* Two rows, merge them */ if (nrow == 2) { int a1 = AColumn[0]; int a2 = AColumn[1]; return MergeRows<false>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column) - Tmp1Column; } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex a1 = *AColumn++; TIndex a2 = *AColumn++; TIndex c_col1 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column ) - Tmp1Column; // Go by pairs. while(AColumn + 1 < AColumnEnd) { a1 = *AColumn++; a2 = *AColumn++; TIndex c_col2 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; if (AColumn == AColumnEnd) { return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; } else { c_col1 = MergeRows<true>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column; std::swap(Tmp1Column, Tmp3Column); } } // Merge the tail. a2 = *AColumn; return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column; } /** * @brief This method is oriented to multiply rows * @param AColumn The index of the first matrix column * @param AColumnEnd The last index of the first matrix column * @param AValue The values of the first matrix * @param BPtr The array constining the nonzero values per row of the second matrix * @param BColumn The index of the second matrix column * @param BValue The values of the second matrix * @param OutColumn Indexes of the columns of output matrix * @param OutValue Values of the columns of output matrix * @param Tmp2Column Indexes of the columns of second matrix * @param Tmp2Value Values of the columns of second matrix * @param Tmp3Column Indexes of the columns of third matrix * @param Tmp3Value Values of the columns of third matrix * @return The resulting row */ template <class TIndex, class TValueType> static void ProdRow( const TIndex* AColumn, const TIndex* AColumnEnd, const TValueType *AValue, const TIndex* BPtr, const TIndex* BColumn, const TValueType *BValue, TIndex* OutColumn, TValueType *OutValue, TIndex* Tmp2Column, TValueType *Tmp2Value, TIndex* Tmp3Column, TValueType *Tmp3Value ) { const TIndex nrow = AColumnEnd - AColumn; /* No rows to merge, nothing to do */ if (nrow == 0) return; /* Single row, just copy it to output */ if (nrow == 1) { TIndex ac = *AColumn; TValueType av = *AValue; const TValueType *bv = BValue + BPtr[ac]; const TIndex* bc = BColumn + BPtr[ac]; const TIndex* be = BColumn + BPtr[ac+1]; while(bc != be) { *OutColumn++ = *bc++; *OutValue++ = av * (*bv++); } return; } /* Two rows, merge them */ if (nrow == 2) { TIndex ac1 = AColumn[0]; TIndex ac2 = AColumn[1]; TValueType av1 = AValue[0]; TValueType av2 = AValue[1]; MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], OutColumn, OutValue ); } /* Generic case (more than two rows). * * Merge rows by pairs, then merge the results together. * When merging two rows, the result is always wider (or equal). * Merging by pairs allows to work with short rows as often as possible. */ // Merge first two. TIndex ac1 = *AColumn++; TIndex ac2 = *AColumn++; TValueType av1 = *AValue++; TValueType av2 = *AValue++; TIndex* tm1_col = OutColumn; TValueType *tm1_val = OutValue; TIndex c_col1 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], tm1_col, tm1_val ) - tm1_col; // Go by pairs. while(AColumn + 1 < AColumnEnd) { ac1 = *AColumn++; ac2 = *AColumn++; av1 = *AValue++; av2 = *AValue++; TIndex c_col2 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp2Column, Tmp2Value ) - Tmp2Column; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, amgcl::math::identity<TValueType>(), Tmp2Column, Tmp2Column + c_col2, Tmp2Value, Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // Merge the tail if there is one. if (AColumn < AColumnEnd) { ac2 = *AColumn++; av2 = *AValue++; c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp3Column, Tmp3Value ) - Tmp3Column; std::swap(Tmp3Column, tm1_col); std::swap(Tmp3Value, tm1_val); } // If we are lucky, tm1 now points to out. // Otherwise, copy the results. if (tm1_col != OutColumn) { std::copy(tm1_col, tm1_col + c_col1, OutColumn); std::copy(tm1_val, tm1_val + c_col1, OutValue); } return; } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class SparseMatrixMultiplicationUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ // /****************************** INPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::istream& operator >> (std::istream& rIStream, // SparseMatrixMultiplicationUtility& rThis); // // /***************************** OUTPUT STREAM FUNCTION ******************************/ // /***********************************************************************************/ // // template<class TPointType, class TPointerType> // inline std::ostream& operator << (std::ostream& rOStream, // const SparseMatrixMultiplicationUtility& rThis) // { // return rOStream; // } ///@} } // namespace Kratos. #endif // KRATOS_TREE_CONTACT_SEARCH_H_INCLUDED defined
palindrome_linear.c
#include <ctype.h> #include <omp.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_LEN 32 char dictionary[(1 << 15)][MAX_LEN]; int words_cnt; bool query_string(const char* query) { char reversed[MAX_LEN] = {'\0'}; for (int i = strlen(query) - 1, j = 0; i >= 0; i--, j++) { reversed[j] = query[i]; } for (int i = 0; i < words_cnt; i++) { if (strcmp(dictionary[i], reversed) == 0) { return true; } } return false; } int main(int argc, char* argv[]) { if (argc != 4) { fprintf(stderr, "Usage: %s THREAD_NUM INPUT_FILE OUTPUT_FILE\n", argv[0]); return 1; } const int THREAD_NUM = atoi(argv[1]); const char* INPUT_PATH = argv[2]; const char* OUTPUT_PATH = argv[3]; FILE *input, *output; double start, end; if ((input = fopen(INPUT_PATH, "r")) == NULL) { fprintf(stderr, "Error while opening file %s\n", INPUT_PATH); return 1; } if ((output = fopen(OUTPUT_PATH, "w")) == NULL) { fprintf(stderr, "Error while opening file %s\n", OUTPUT_PATH); return 1; } start = omp_get_wtime(); while (fgets(dictionary[words_cnt], MAX_LEN, input) != NULL) { const int len = strlen(dictionary[words_cnt]); if (strcmp(dictionary[words_cnt] + len - 2, "\r\n") == 0) { dictionary[words_cnt][len - 2] = '\0'; } if (strlen(dictionary[words_cnt]) > 0) { words_cnt++; } } #pragma omp parallel for for (int i = 0; i < words_cnt; i++) { if (query_string(dictionary[i])) { fprintf(output, "%s\n", dictionary[i]); } } end = omp_get_wtime(); printf("Configuration: %d threads\tTime: %f\n", THREAD_NUM, end - start); fclose(input); fclose(output); return 0; }
teams_notarget_get_num_teams.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #define N 10000 #define MAX_TEAMS 64 int main() { int n = N; int team_id, cur_teams; int teams_sizes[10] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512}; int *a = (int *)malloc(n*sizeof(int)); int err = 0; for (int j = 0; j < 10; j++) { cur_teams = 0; #pragma omp teams distribute num_teams(teams_sizes[j]) for (int i = 0; i < n; i++) { cur_teams = omp_get_num_teams(); a[i] = i; } err = 0; for (int i = 0; i < n; i++) { if (a[i] != i) { printf("Error at %d: a = %d, should be %d\n", i, a[i], i); err++; if (err > 10) break; } } // If we have bigger number than MAX_TEAMS in num_teams() clause we will // get omp_get_num_teams() as MAX_TEAMS. if ( ((cur_teams > MAX_TEAMS) && (cur_teams != MAX_TEAMS)) && (cur_teams != teams_sizes[j]) ) { printf("omp_get_num_teams() : %d but we tried to set num_teams(%d)\n", cur_teams, teams_sizes[j]); err++; } } return err; }
J2OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h" #include <qmc_common.h> #endif #include "Particle/DistanceTableData.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for two-body Jastrow function using multiple functors * * Each pair-type can have distinct function \f$u(r_{ij})\f$. * For electrons, distinct pair correlation functions are used * for spins up-up/down-down and up-down/down-up. * * Based on J2OrbitalSoA.h with these considerations * - DistanceTableData using SoA containers * - support mixed precision: FT::real_type != OHMMS_PRECISION * - loops over the groups: elminated PairID * - support simd function * - double the loop counts * - Memory use is O(N). */ template<class FT> struct J2OrbitalSoA : public WaveFunctionComponent { ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using RowContainer = DistanceTableData::RowContainer; ///number of particles size_t N; ///number of particles + padded size_t N_padded; ///number of groups of the target particleset size_t NumGroups; ///Used to compute correction bool FirstTime; ///diff value RealType DiffVal; ///Correction RealType KEcorr; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; gContainer_type dUat; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat; valT cur_Uat; aligned_vector<valT> cur_u, cur_du, cur_d2u; aligned_vector<valT> old_u, old_du, old_d2u; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; ///Container for \f$F[ig*NumGroups+jg]\f$ std::vector<FT*> F; ///Uniquue J2 set for cleanup std::map<std::string, FT*> J2Unique; /// e-e table ID const int my_table_ID_; J2OrbitalSoA(ParticleSet& p, int tid); J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete; ~J2OrbitalSoA(); /* initialize storage */ void init(ParticleSet& p); /** add functor for (ia,ib) pair */ void addFunc(int ia, int ib, FT* j); void resetTargetParticleSet(ParticleSet& P) { if (dPsi) dPsi->resetTargetParticleSet(P); } /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkOutVariables(active); ++it; } if (dPsi) dPsi->checkOutVariables(active); } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->resetParameters(active); ++it; } if (dPsi) dPsi->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } ChiesaKEcorrection(); } RealType ChiesaKEcorrection() { return RealType(); } /**@} */ WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; RealType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L); void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi); /** recompute internal data assuming distance table is fully ready */ void recompute(ParticleSet& P); ValueType ratio(ParticleSet& P, int iat); void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).Distances[k])); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); GradType evalGrad(ParticleSet& P, int iat); ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat); void acceptMove(ParticleSet& P, int iat); inline void restore(int iat) {} /** compute G and L after the sweep */ void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false); inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(N), N); dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(N), N); } RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } /*@{ internal compute engines*/ inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist) { valT curUat(0); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data()); } return curUat; } inline void computeU3(const ParticleSet& P, int iat, const RealType* restrict dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle = false); /** compute gradient */ inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const { posT grad; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < N; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return grad; } /**@} */ }; template<typename FT> J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : my_table_ID_(p.addTable(p, DT_SOA)) { init(p); FirstTime = true; KEcorr = 0.0; ClassName = "J2OrbitalSoA"; } template<typename FT> J2OrbitalSoA<FT>::~J2OrbitalSoA() { auto it = J2Unique.begin(); while (it != J2Unique.end()) { delete ((*it).second); ++it; } } //need to clean up J2Unique template<typename FT> void J2OrbitalSoA<FT>::init(ParticleSet& p) { N = p.getTotalNum(); N_padded = getAlignedSize<valT>(N); NumGroups = p.groups(); Uat.resize(N); dUat.resize(N); d2Uat.resize(N); cur_u.resize(N); cur_du.resize(N); cur_d2u.resize(N); old_u.resize(N); old_du.resize(N); old_d2u.resize(N); F.resize(NumGroups * NumGroups, nullptr); DistCompressed.resize(N); DistIndice.resize(N); } template<typename FT> void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j) { if (ia == ib) { if (ia == 0) //first time, assign everything { int ij = 0; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg, ++ij) if (F[ij] == nullptr) F[ij] = j; } else F[ia * NumGroups + ib] = j; } else { if (N == 2) { // a very special case, 1 up + 1 down // uu/dd was prevented by the builder for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg) F[ig * NumGroups + jg] = j; } else { // generic case F[ia * NumGroups + ib] = j; F[ib * NumGroups + ia] = j; } } std::stringstream aname; aname << ia << ib; J2Unique[aname.str()] = j; //ChiesaKEcorrection(); FirstTime = false; } template<typename FT> WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const { J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(tqp, -1); if (dPsi) j2copy->dPsi = dPsi->makeClone(tqp); std::map<const FT*, FT*> fcmap; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = ig; jg < NumGroups; ++jg) { int ij = ig * NumGroups + jg; if (F[ij] == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]); if (fit == fcmap.end()) { FT* fc = new FT(*F[ij]); j2copy->addFunc(ig, jg, fc); //if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc); fcmap[F[ij]] = fc; } } j2copy->Optimizable = Optimizable; return j2copy; } /** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$ * @param P particleset * @param iat particle index * @param dist starting distance * @param u starting value * @param du starting first deriv * @param d2u starting second deriv */ template<typename FT> inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const RealType* restrict dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle) { const int jelmax = triangle ? iat : N; constexpr valT czero(0); std::fill_n(u, jelmax, czero); std::fill_n(du, jelmax, czero); std::fill_n(d2u, jelmax, czero); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = std::min(jelmax, P.last(jg)); f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data()); } //u[iat]=czero; //du[iat]=czero; //d2u[iat]=czero; } template<typename FT> typename J2OrbitalSoA<FT>::ValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat) { //only ratio, ready to compute it again UpdateMode = ORB_PBYP_RATIO; cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).Temp_r.data()); return std::exp(Uat[iat] - cur_Uat); } template<typename FT> inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& d_table = P.getDistTable(my_table_ID_); const auto* restrict dist = d_table.Temp_r.data(); for (int ig = 0; ig < NumGroups; ++ig) { const int igt = ig * NumGroups; valT sumU(0); for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data()); } for (int i = P.first(ig); i < P.last(ig); ++i) { // remove self-interaction const valT Uself = F[igt + ig]->evaluate(dist[i]); ratios[i] = std::exp(Uat[i] + Uself - sumU); } } } template<typename FT> typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } template<typename FT> typename J2OrbitalSoA<FT>::ValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(my_table_ID_).Temp_r.data(), cur_u.data(), cur_du.data(), cur_d2u.data()); cur_Uat = simd::accumulate_n(cur_u.data(), N, valT()); DiffVal = Uat[iat] - cur_Uat; grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).Temp_dr); return std::exp(DiffVal); } template<typename FT> void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat) { // get the old u, du, d2u const auto& d_table = P.getDistTable(my_table_ID_); computeU3(P, iat, d_table.Distances[iat], old_u.data(), old_du.data(), old_d2u.data()); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives const auto* restrict dist = d_table.Temp_r.data(); computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data()); } valT cur_d2Uat(0); const auto& new_dr = d_table.Temp_dr; const auto& old_dr = d_table.Displacements[iat]; constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : cur_d2Uat) for (int jat = 0; jat < N; jat++) { const valT du = cur_u[jat] - old_u[jat]; const valT newl = cur_d2u[jat] + lapfac * cur_du[jat]; const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl; Uat[jat] += du; d2Uat[jat] += dl; cur_d2Uat -= newl; } posT cur_dUat; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict new_dX = new_dr.data(idim); const valT* restrict old_dX = old_dr.data(idim); const valT* restrict cur_du_pt = cur_du.data(); const valT* restrict old_du_pt = old_du.data(); valT* restrict save_g = dUat.data(idim); valT cur_g = cur_dUat[idim]; #pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt) for (int jat = 0; jat < N; jat++) { const valT newg = cur_du_pt[jat] * new_dX[jat]; const valT dg = newg - old_du_pt[jat] * old_dX[jat]; save_g[jat] -= dg; cur_g += newg; } cur_dUat[idim] = cur_g; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; } template<typename FT> void J2OrbitalSoA<FT>::recompute(ParticleSet& P) { const auto& d_table = P.getDistTable(my_table_ID_); for (int ig = 0; ig < NumGroups; ++ig) { const int igt = ig * NumGroups; for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat) { computeU3(P, iat, d_table.Distances[iat], cur_u.data(), cur_du.data(), cur_d2u.data(), true); Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT()); posT grad; valT lap(0); const valT* restrict u = cur_u.data(); const valT* restrict du = cur_du.data(); const valT* restrict d2u = cur_d2u.data(); const RowContainer& displ = d_table.Displacements[iat]; constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : lap) aligned(du, d2u) for (int jat = 0; jat < iat; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < iat; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } dUat(iat) = grad; d2Uat[iat] = -lap; // add the contribution from the upper triangle #pragma omp simd aligned(u, du, d2u) for (int jat = 0; jat < iat; jat++) { Uat[jat] += u[jat]; d2Uat[jat] -= d2u[jat] + lapfac * du[jat]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict dX = displ.data(idim); #pragma omp simd aligned(save_g, du, dX) for (int jat = 0; jat < iat; jat++) save_g[jat] -= du[jat] * dX[jat]; } } } } template<typename FT> typename J2OrbitalSoA<FT>::RealType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } template<typename FT> void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < N; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } constexpr valT mhalf(-0.5); LogValue = mhalf * LogValue; } template<typename FT> void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { LogValue = 0.0; const DistanceTableData& d_ee(P.getDistTable(my_table_ID_)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int i=1; i<N; ++i) { const valT* dist = d_ee.Distances[i]; const RowContainer& displ = d_ee.Displacements[i]; auto ig = P.GroupID[i]; const int igt = ig * NumGroups; for (int j = 0; j < i; ++j) { auto r = dist[j]; auto rinv = 1.0 / r; auto dr = displ[j]; auto jg = P.GroupID[j]; auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2); LogValue -= uij; auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; grad_grad_psi[i] -= hess; grad_grad_psi[j] -= hess; } } } } // namespace qmcplusplus #endif
kernel.openmp.h
#include <iris/iris_openmp.h> static void saxpy(float* Z, float A, float* X, float* Y, IRIS_OPENMP_KERNEL_ARGS) { size_t i; #pragma omp parallel for shared(Z, A, X, Y) private(i) IRIS_OPENMP_KERNEL_BEGIN(i) Z[i] = A * X[i] + Y[i]; IRIS_OPENMP_KERNEL_END }
geometric_distortion.h
/* * Software License Agreement * * Point to plane metric for point cloud distortion measurement * Copyright (c) 2016, MERL * * All rights reserved. * * Contributors: * Dong Tian <tian@merl.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the copyright holder(s) nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #ifndef GEOMETRIC_DISTORTION_HPP #define GEOMETRIC_DISTORTION_HPP #include <pcl/io/ply_io.h> #include <pcl/common/common.h> #include <pcl/point_types.h> #include <pcl/search/kdtree.h> #include <pcl/features/normal_3d_omp.h> #include <mutex> using namespace std; using namespace pcl; using namespace pcl::io; using namespace pcl::console; using namespace pcl::search; namespace pcl { namespace geometric_quality { class commandPar { public: string file1; string file2; string normFile; //! output the normals to this file float rtimes; //! \times the minimum distance of nearest neighbor float radius; //! radius to estimate normals. to be derived based on rtimes float BBsize; //! Maximum Bounding Box length of point cloud, the scale of two point cloud need to be same int knn; //! knn method to do normal estimation bool force; //! Force to do normal estimation, even if the normals provided in the input bool singlePass; //! Force to run a single pass algorithm. where the loop is over the original point cloud bool hausdorff; //! true: output hausdorff metric as well bool c2c_only; //! skip point-to-plane metric commandPar() { file1 = ""; file2 = ""; normFile = ""; rtimes = -1.0; radius = 1.0; BBsize = 1.0; knn = 0; force = false; singlePass = false; hausdorff = false; c2c_only = false; } }; /**! * \brief * Store the quality metric for point to plane measurements */ class qMetric { public: // point-2-point ( cloud 2 cloud ), benchmark metric float c2c_rms; //! store symm rms metric float c2c_hausdorff; //! store symm haussdorf float c2c_psnr; float c2c_hausdorff_psnr; //! store symm haussdorf // point-2-plane ( cloud 2 plane ), proposed metric float c2p_rms; //! store symm rms metric float c2p_hausdorff; //! store symm haussdorf float c2p_psnr; float c2p_hausdorff_psnr; //! store symm haussdorf // point 2 plane ( cloud 2 plane ), proposed metric float maxDist; //! maximum distnace between NN points in reference point cloud qMetric() { c2c_rms = 0; c2c_hausdorff = 0; c2p_rms = 0; c2p_hausdorff = 0; } }; /* * \brief load a point cloud * \param[in] file_name: the name of the file to load * \param[out] cloud: the resultant templated point cloud */ template<typename PointT> int readcloud(const string &file_name, PointCloud<PointT> &cloud) { int(*readfunc)( const string &, PointCloud<PointT> & ); std::string suffix; suffix = file_name.substr(file_name.find_last_of(".") + 1); if (suffix == "pcd") readfunc = io::loadPCDFile; else if (suffix == "ply") readfunc = io::loadPLYFile; else { cerr << "Error: File " << file_name << " doesn't have a valid suffix" << endl; return -1; } return readfunc(file_name.c_str(), cloud); } /* * \brief write a point cloud into file * \param[in] file_name: the name of the file to load * \param[out] cloud: the resultant templated point cloud */ template<typename PointT> int writecloud(const string &file_name, PointCloud<PointT> &cloud) { int(*savefunc)( const string &, const pcl::PointCloud<PointT> &, bool ); string suffix; suffix = file_name.substr(file_name.find_last_of(".") + 1); if (suffix == "pcd") { savefunc = pcl::io::savePCDFile; } else if (suffix == "ply") { savefunc = pcl::io::savePLYFile; } else { cout << "Error: File " << file_name.c_str() << " doesn't have a valid suffix" << endl; return -1; } return savefunc(file_name.c_str(), cloud, false); } /**! * \function * Compute the minimum and maximum NN distances, find out the * intrinsic resolutions * \parameters * @param cloudA: point cloud * @param minDist: output * @param maxDist: output * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void findNNdistances(PointCloud<PointT> &cloudA, float &minDist, float &maxDist) { maxDist = numeric_limits<float>::min(); minDist = numeric_limits<float>::max(); double distTmp = 0; mutex myMutex; search::KdTree<PointT> treeA; treeA.setInputCloud(cloudA.makeShared()); #pragma omp parallel for for (size_t i = 0; i < cloudA.points.size(); ++i) { std::vector<int> indices; std::vector<float> sqrDist; int nFound = treeA.nearestKSearch(cloudA.points[i], 2, indices, sqrDist); if ( nFound <= 0) cerr << "Error! No NN found!" << endl; if (indices[0] != i || sqrDist[1] <= 0.0000000001) { // Maybe print some warnings // cerr << "Error! nFound = " << nFound << ", i, iFound = " << i << ", " << indices[0] << ", " << indices[1] << endl; // cerr << " Distances = " << sqrDist[0] << ", " << sqrDist[1] << endl; // cerr << " Some points are repeated!" << endl; } else { // Use the second one. assume the first one is the current point myMutex.lock(); distTmp = sqrt( sqrDist[1] ); if (distTmp > maxDist) maxDist = distTmp; if (distTmp < minDist) minDist = distTmp; myMutex.unlock(); } } } /**! * \function * Convert the MSE error to PSNR numbers * \parameters * @param cloudA: the original point cloud * @param dist: the distortion * @param p: the peak value for conversion * \return * psnr value * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> float getPSNR(PointCloud<PointT> &cloudA, float dist, float p) { // @DT: If bounding box is wanted for the peak value // PointT pMinA, pMaxA; // getMinMax3D( cloudA, pMinA, pMaxA ); // metric.maxDist = pMaxA; float max_energy = p * p; float psnr = 10 * log10( max_energy / (dist*dist) ); return psnr; } /**! * \function * Check if meaningful normals exist. * \parameters * @param cloudA: the original point cloud * \return * true: normals are available * false: otherwise * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> bool checkNormalsAvailability(PointCloud<PointT> &cloudA) { size_t sz = cloudA.points.size(); size_t i = 0; if (cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 ) return true; i = sz - 1; if (cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 ) return true; i = sz / 2 - 1; if (cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 && cloudA.at(i).normal_x != 0 ) return true; return false; } /**! * \function * Derive the normals for the decoded point cloud based on the * normals in the original point cloud * \parameters * @param cloudA: the original point cloud * @param cloudNormalsA: the normals in the original point cloud * @param cloudB: the decoded point cloud * @param cloudNormalsB: the normals in the original point * cloud. Output parameter * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void scaleNormals(PointCloud<PointT> &cloudA, PointCloud<Normal>::Ptr &cloudNormalsA, PointCloud<PointT> &cloudB, vector< vector<float> > &cloudNormalsB) { // Prepare the buffer to compute the average normals clock_t t1 = clock(); vector< vector<int> > vecMap( cloudB.points.size() ); for (size_t i = 0; i < cloudB.points.size(); i++) { cloudNormalsB[i].push_back(0.0); // x cloudNormalsB[i].push_back(0.0); // y cloudNormalsB[i].push_back(0.0); // z vecMap[i].clear(); } // sum up search::KdTree<PointT> treeA; treeA.setInputCloud (cloudA.makeShared()); search::KdTree<PointT> treeB; treeB.setInputCloud (cloudB.makeShared()); for (size_t i = 0; i < cloudA.points.size(); i++) { // Find the NNs in cloudA vector<int> indices; vector<float> sqrDist; float nX, nY, nZ; int nCount; treeB.nearestKSearch(cloudA.points[i], 1, indices, sqrDist); nX = nY = nZ = 0.0; nCount = 0; if ( !isnan(cloudNormalsA->at(i).normal_x) && !isnan(cloudNormalsA->at(i).normal_y) && !isnan(cloudNormalsA->at(i).normal_z) ) { cloudNormalsB[indices[0]][0] += cloudNormalsA->at( i ).normal_x; cloudNormalsB[indices[0]][1] += cloudNormalsA->at( i ).normal_y; cloudNormalsB[indices[0]][2] += cloudNormalsA->at( i ).normal_z; vecMap[ indices[0] ].push_back( i ); } } // average now for (size_t i = 0; i < cloudB.points.size(); i++) { int nCount = vecMap[i].size(); if (nCount > 0) // main branch { cloudNormalsB[i][0] = cloudNormalsB[i][0] / nCount; cloudNormalsB[i][1] = cloudNormalsB[i][1] / nCount; cloudNormalsB[i][2] = cloudNormalsB[i][2] / nCount; } else { vector<int> indices; vector<float> sqrDist; treeA.nearestKSearch(cloudB.points[i], 1, indices, sqrDist); if ( !isnan(cloudNormalsA->at(indices[0]).normal_x) && !isnan(cloudNormalsA->at(indices[0]).normal_y) && !isnan(cloudNormalsA->at(indices[0]).normal_z) ) { cloudNormalsB[i][0] = cloudNormalsA->at( indices[0] ).normal_x; cloudNormalsB[i][1] = cloudNormalsA->at( indices[0] ).normal_y; cloudNormalsB[i][2] = cloudNormalsA->at( indices[0] ).normal_z; } else { // Should never comes here. The code just for completeness cloudNormalsB[i][0] = 0; cloudNormalsB[i][1] = 0; cloudNormalsB[i][2] = 0; } } } clock_t t2 = clock(); cout << " Converting normal vector DONE. It takes " << (t2-t1)/CLOCKS_PER_SEC << " seconds (in CPU time)." << endl; } /**! * \function * Get the normals for the original point cloud, either by importing * or estimation * \parameters * @param cloudA: the original point cloud * @param normFile: the file name to store the normals * @param cPar: input parameter from command line * @param cloudNormals: output paraemter for the normals * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void getNormals(PointCloud<PointT> &cloudA, string &normFile, commandPar &cPar, PointCloud<Normal>::Ptr cloudNormals) { if (!cPar.force && checkNormalsAvailability( cloudA ) ) { cout << " Import existing normal from input" << endl; copyPointCloud( cloudA, *cloudNormals ); cout << " Normal importing on original point cloud DONE!" << endl; return; } clock_t t1; t1=clock(); if (cPar.knn == 0) { // Step 0 ------------------ float minDist; float maxDist; findNNdistances(cloudA, minDist, maxDist); cout << " Point cloud, to estimate normals: minDist, maxDist = " << minDist << ", " << maxDist << endl; cPar.radius = cPar.rtimes * minDist; cout << " Radius in use: " << cPar.radius << endl; } else { cout << " KNN in use: " << cPar.knn << endl; } cout << " Normal estimation begin.." << endl; // Step 1 ------------------ // @DT: Compute the normals of A, the reference point cloud // Create the normal estimation class, and pass the input dataset to it NormalEstimationOMP<PointXYZRGBNormal, Normal> ne; ne.setInputCloud(cloudA.makeShared()); // Create an empty kdtree representation, and pass it to the normal estimation object. // Its content will be filled inside the object, based on the given input dataset (as no other search surface is given). search::KdTree<PointXYZRGBNormal>::Ptr tree (new search::KdTree<PointXYZRGBNormal>()); ne.setSearchMethod(tree); // // Output datasets // PointCloud<Normal>::Ptr cloudNormals (new PointCloud<Normal>); if (cPar.knn == 0) // Use all neighbors in a sphere of radius ne.setRadiusSearch( cPar.radius ); else // Use a fixed number of neighbors ne.setKSearch( cPar.knn ); // Set view points Eigen::Vector4f centroid; centroid.setZero(); compute3DCentroid( cloudA, centroid ); PointT pMin, pMax; getMinMax3D( cloudA, pMin, pMax ); ne.setViewPoint( centroid[0], centroid[1], pMax.z+1.0 ); // cout << "Centroid: " << centroid[0] << " " << centroid[1] << " " << centroid[2] << " " << centroid[3] << " size " << cloudA.size() << endl; // cout << "Min: " << pMin.x << " " << pMin.y << " " << pMin.z << endl; // cout << "Max: " << pMax.x << " " << pMax.y << " " << pMax.z << endl; if ( normFile != "" ) { PointCloud<PointXYZRGBNormal>::Ptr cloudWithNormals(new PointCloud<PointXYZRGBNormal>); copyPointCloud( cloudA, *cloudWithNormals ); copyPointCloud( *cloudNormals, *cloudWithNormals ); writecloud(normFile, *cloudWithNormals); } else { // Check if any nan normals size_t nanNormal = 0; do { nanNormal = 0; if (cPar.knn == 0) { // Use all neighbors in a sphere of radius cPar.radius *= 2; cout << " Radius in use: " << cPar.radius << endl; ne.setRadiusSearch( cPar.radius ); } else { // Double the number of neighbors cPar.knn *= 2; cout << " KNN in use: " << cPar.knn << endl; ne.setKSearch( cPar.knn ); } // Compute the features ne.compute(*cloudNormals); for (size_t i = 0; i < cloudA.points.size(); i++) { // cout << "Nx " << cloudNormals->at(i).normal_x << " Ny " << cloudNormals->at(i).normal_y << " Nz " << cloudNormals->at(i).normal_z << endl; if ( isnan(cloudNormals->at(i).normal_x) || isnan(cloudNormals->at(i).normal_y) || isnan(cloudNormals->at(i).normal_z) ) nanNormal++; } } while (nanNormal > 0); } // if (nanNormal > 0) // { // if (cPar.knn == 0) // cerr << " ** Warning: nan normals found: " << nanNormal << "! Increase the radius!" << endl; // else // cerr << " ** Warning: nan normals found: " << nanNormal << "! Increase the knn!" << endl; // // cout << "The points with nan normals would be excluded from metric calculation." << endl; // } cout << " Normal estimation on original point cloud DONE! It takes " << (clock() - t1) / CLOCKS_PER_SEC << " seconds (in CPU time)." << endl; } /**! * function to compute the symmetric quality metric: Point-to-Point and Point-to-Plane * @param cloudA: point cloud, original version * @param cloudB: point cloud, decoded/reconstructed version * @param cPar: input parameters * @param qual_metric: quality metric, to be returned * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void computeGeometricQualityMetric(PointCloud<PointT> &cloudA, PointCloud<PointT> &cloudB, commandPar &cPar, qMetric &qual_metric) { float minDist; float maxDist; findNNdistances(cloudA, minDist, maxDist); qual_metric.maxDist = maxDist; cout << "Minimum and maximum NN distances (intrinsic resolutions): " << minDist << ", " << maxDist << endl; // Check cloud size size_t orgSize = max( cloudA.points.size(), cloudB.points.size() ); size_t newSize = min( cloudA.points.size(), cloudB.points.size() ); float ratio = 1.0 * newSize / orgSize; cout << "Point cloud sizes for org version, dec version, and the scaling ratio: " << orgSize << ", " << newSize << ", " << ratio << endl; if (cPar.file2 == "" && cPar.normFile == "" ) // If no file2 & no normFile provided, return just after checking the NN return; // Estimate or import normals, only on original point cloud PointCloud<Normal>::Ptr cloudNormalsA (new PointCloud<Normal>); if (!cPar.c2c_only) { cout << endl; cout << "0. Preparing normals.\n"; getNormals(cloudA, cPar.normFile, cPar, cloudNormalsA); } if (cPar.file2 == "") // If no file2 provided, return just after normal estimations. return; // Based on normals on original point cloud, derive normals on reconstructed point cloud vector< vector<float> > cloudNormalsB( cloudB.points.size() ); if (!cPar.c2c_only) scaleNormals( cloudA, cloudNormalsA, cloudB, cloudNormalsB ); cout << endl; // Use "a" as reference cout << "1. Use infile1 (A) as reference, loop over A, use normals on B. (A->B).\n"; qMetric metricA; metricA.maxDist = maxDist; findMetricA( cloudA, cloudB, cPar, cloudNormalsB, metricA ); cout << " ### A->B,ACD1,p2point," << metricA.c2c_rms << endl; // cout << " ### A->B,rms1PSNR,p2point," << metricA.c2c_psnr << endl; if (!cPar.c2c_only) { cout << " ### A->B,ACD1,p2plane," << metricA.c2p_rms << endl; // cout << " ### A->B,rms1PSNR,p2plane," << metricA.c2p_psnr << endl; } if ( cPar.hausdorff ) { cout << " ### A->B,h1,p2point," << metricA.c2c_hausdorff << endl; // cout << " ### A->B,hPSNR1,p2point," << metricA.c2c_hausdorff_psnr << endl; if (!cPar.c2c_only) { cout << " ### A->B,h1,p2plane," << metricA.c2p_hausdorff << endl; // cout << " ### A->B,hPSNR1,p2plane," << metricA.c2p_hausdorff_psnr << endl; } } if (!cPar.singlePass) { // Use "b" as reference cout << "2. Use infile2 (B) as reference, loop over B, use normals on A. (B->A).\n"; qMetric metricB; metricB.maxDist = maxDist; findMetricB( cloudA, cloudB, cPar, cloudNormalsA, metricB ); cout << " ### B->A,ACD2,p2point," << metricB.c2c_rms << endl; // cout << " ### B->A,rms2PSNR,p2point," << metricB.c2c_psnr << endl; if (!cPar.c2c_only) { cout << " ### B->A,ACD2,p2plane," << metricB.c2p_rms << endl; // cout << " ### B->A,rms2PSNR,p2plane," << metricB.c2p_psnr << endl; } if ( cPar.hausdorff ) { cout << " ### B->A,h2,p2point," << metricB.c2c_hausdorff << endl; // cout << " ### B->A,hPSNR2,p2point," << metricB.c2c_hausdorff_psnr << endl; if (!cPar.c2c_only) { cout << " ### B->A,h2,p2plane," << metricB.c2p_hausdorff << endl; // cout << " ### B->A,hPSNR2,p2plane," << metricB.c2p_hausdorff_psnr << endl; } } // Derive the final symmetric metric // qual_metric.c2c_rms = max( metricA.c2c_rms, metricB.c2c_rms ); // qual_metric.c2p_rms = max( metricA.c2p_rms, metricB.c2p_rms ); // qual_metric.c2c_psnr = min( metricA.c2c_psnr, metricB.c2c_psnr ); // qual_metric.c2p_psnr = min( metricA.c2p_psnr, metricB.c2p_psnr ); qual_metric.c2c_rms = 0.5 * ( metricA.c2c_rms + metricB.c2c_rms ); // Chamfer distance qual_metric.c2p_rms = 0.5 * ( metricA.c2p_rms + metricB.c2p_rms ); // Chamfer distance qual_metric.c2c_psnr = 10 * log10( cPar.BBsize * cPar.BBsize / (qual_metric.c2c_rms) ); // CD-PSNR qual_metric.c2p_psnr = 10 * log10( cPar.BBsize * cPar.BBsize / (qual_metric.c2p_rms) ); // CD-PSNR qual_metric.c2c_hausdorff = max( metricA.c2c_hausdorff, metricB.c2c_hausdorff ); qual_metric.c2p_hausdorff = max( metricA.c2p_hausdorff, metricB.c2p_hausdorff ); // qual_metric.c2c_hausdorff_psnr = min( metricA.c2c_hausdorff_psnr, metricB.c2c_hausdorff_psnr ); // qual_metric.c2p_hausdorff_psnr = min( metricA.c2p_hausdorff_psnr, metricB.c2p_hausdorff_psnr ); cout << "3. Final (symmetric).\n"; cout << " ### Symmetric,CD,p2point," << qual_metric.c2c_rms << endl; cout << " ### Symmetric,CD-PSNR,p2point," << qual_metric.c2c_psnr << endl; if (!cPar.c2c_only) { cout << " ### Symmetric,CD,p2plane," << qual_metric.c2p_rms << endl; cout << " ### Symmetric,CD-PSNR,p2plane," << qual_metric.c2p_psnr << endl; } if ( cPar.hausdorff ) { cout << " ### Symmetric,hF,p2point," << qual_metric.c2c_hausdorff << endl; // cout << " ### Symmetric,hPSNRF,p2point," << qual_metric.c2c_hausdorff_psnr << endl; if (!cPar.c2c_only) { cout << " ### Symmetric,hF,p2plane," << qual_metric.c2p_hausdorff << endl; // cout << " ### Symmetric,hPSNRF,p2plane," << qual_metric.c2p_hausdorff_psnr << endl; } } } } /**! * \function * To compute "one-way" quality metric: Point-to-Point and * Point-to-Plane. Loop over each point in A. Normals in B to be used * * 1) For each point in A, find a corresponding point in B. * 2) Form an error vector between the point pair. * 3) Use the length of the error vector as point-to-point measure * 4) Project the error vector along the normals in B, use the length * of the projected error vector as point-to-plane measure * * @param cloudA: Reference point cloud. e.g. the original cloud, on * which normals would be estimated. It is the full set of point * cloud. Multiple points in count * @param cloudB: Processed point cloud. e.g. the decoded cloud * @param cPar: Command line parameters * @param cloudNormalsB: Normals for cloudB * @param metric: updated quality metric, to be returned * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void findMetricA(PointCloud<PointT> &cloudA, PointCloud<PointT> &cloudB, commandPar &cPar, vector< vector<float> > &cloudNormalsB, qMetric &metric) { mutex myMutex; // @DT: Compute the projected distance along the normal direction (cloud 2 plane) clock_t t2 = clock(); float max_dist_b_c2p = -std::numeric_limits<float>::max(); double rms_dist_b_c2p = 0; float max_dist_b_c2c = -std::numeric_limits<float>::max(); double rms_dist_b_c2c = 0; size_t num = 0; search::KdTree<PointT> treeB; treeB.setInputCloud (cloudB.makeShared()); #pragma omp parallel for for (size_t i = 0; i < cloudA.points.size(); i++) { // Find the nearest neighbor in B. store it in 'j' vector<int> indices(1); vector<float> sqrDist(1); treeB.nearestKSearch(cloudA.points[i], 1, indices, sqrDist); int j = indices[0]; // Compute the error vector vector<float> errVector(3); errVector[0] = cloudA.points[i].x - cloudB.points[j].x; errVector[1] = cloudA.points[i].y - cloudB.points[j].y; errVector[2] = cloudA.points[i].z - cloudB.points[j].z; // // Compute point-to-point, which should be equal to sqrt( sqrDist[0] ) // float distProj_c2c = sqrt( errVector[0] * errVector[0] + // errVector[1] * errVector[1] + // errVector[2] * errVector[2] ); // Compute point-to-point, which should be equal to ( sqrDist[0] ) // @CH used to calculate the ACD float distProj_c2c = ( errVector[0] * errVector[0] + errVector[1] * errVector[1] + errVector[2] * errVector[2] ); // Compute point-to-plane // Normals in B will be used for point-to-plane float distProj = 0.0; if (!cPar.c2c_only) { distProj = fabs( errVector[0] * cloudNormalsB[j][0] + errVector[1] * cloudNormalsB[j][1] + errVector[2] * cloudNormalsB[j][2] ); // @CH used to calculate the ACD distProj = distProj * distProj; } myMutex.lock(); num++; // mean square distance rms_dist_b_c2c += distProj_c2c; if (distProj_c2c > max_dist_b_c2c) max_dist_b_c2c = distProj_c2c; if (!cPar.c2c_only) { rms_dist_b_c2p += distProj; if (distProj > max_dist_b_c2p) max_dist_b_c2p = distProj; } myMutex.unlock(); } rms_dist_b_c2p = rms_dist_b_c2p / num; rms_dist_b_c2c = rms_dist_b_c2c / num; metric.c2p_rms = rms_dist_b_c2p; metric.c2c_rms = rms_dist_b_c2c; metric.c2p_hausdorff = max_dist_b_c2p; metric.c2c_hausdorff = max_dist_b_c2c; // from distance to PSNR. cloudA always the original // metric.c2c_psnr = getPSNR( cloudA, metric.c2c_rms, metric.maxDist ); // metric.c2p_psnr = getPSNR( cloudA, metric.c2p_rms, metric.maxDist ); // metric.c2c_hausdorff_psnr = getPSNR( cloudA, metric.c2c_hausdorff, metric.maxDist ); // metric.c2p_hausdorff_psnr = getPSNR( cloudA, metric.c2p_hausdorff, metric.maxDist ); clock_t t3 = clock(); cerr << " Error computing takes " << (t3-t2)/CLOCKS_PER_SEC << " seconds (in CPU time)." << endl; } /**! * \function * To compute "one-way" quality metric: Point-to-Point and * Point-to-Plane. Loop over each point in B. Normals in A to be used * * 1) For each point in B, find a corresponding point in A. * 2) Form an error vector between the point pair. * 3) Use the length of the error vector as point-to-point measure * 4) Project the error vector along the normals in A, use the length * of the projected error vector as point-to-plane measure * * @param cloudA: Reference point cloud. e.g. the original cloud, on * which normals would be estimated. It is the full set of point * cloud. Multiple points in count * @param cloudB: Processed point cloud. e.g. the decoded cloud * @param cPar: Command line parameters * @param cloudNormalsA: Normals for cloudA * @param metric: updated quality metric, to be returned * \note * PointT typename of point used in point cloud * \author * Dong Tian, MERL */ template<typename PointT> void findMetricB(PointCloud<PointT> &cloudA, PointCloud<PointT> &cloudB, commandPar &cPar, PointCloud<Normal>::Ptr &cloudNormalsA, qMetric &metric) { mutex myMutex; clock_t t2 = clock(); float max_dist_b_c2p = -std::numeric_limits<float>::max(); double rms_dist_b_c2p = 0; float max_dist_b_c2c = -std::numeric_limits<float>::max(); double rms_dist_b_c2c = 0; size_t num = 0; search::KdTree<PointT> treeA; treeA.setInputCloud (cloudA.makeShared()); #pragma omp parallel for for (size_t i = 0; i < cloudB.points.size(); i++) { // Find the nearest neighbor in A. store it in 'j' vector<int> indices(1); vector<float> sqrDist(1); treeA.nearestKSearch(cloudB.points[i], 1, indices, sqrDist); int j = indices[0]; // Compute the error vector vector<float> errVector(3); errVector[0] = cloudB.points[i].x - cloudA.points[j].x; errVector[1] = cloudB.points[i].y - cloudA.points[j].y; errVector[2] = cloudB.points[i].z - cloudA.points[j].z; // // Compute point-to-point, which should be equal to sqrt( sqrDist[0] ) // float distProj_c2c = sqrt( errVector[0] * errVector[0] + // errVector[1] * errVector[1] + // errVector[2] * errVector[2] ); // Compute point-to-point, which should be equal to ( sqrDist[0] ) // @CH used to calculate the ACD float distProj_c2c = ( errVector[0] * errVector[0] + errVector[1] * errVector[1] + errVector[2] * errVector[2] ); // Compute point-to-plane // Normals in A will be used for point-to-plane float distProj; if (!cPar.c2c_only) { distProj = fabs( errVector[0] * cloudNormalsA->at(j).normal_x + errVector[1] * cloudNormalsA->at(j).normal_y + errVector[2] * cloudNormalsA->at(j).normal_z ); // @CH used to calculate the ACD distProj = distProj * distProj; } myMutex.lock(); num++; // mean square distance rms_dist_b_c2c += distProj_c2c; if (distProj_c2c > max_dist_b_c2c) max_dist_b_c2c = distProj_c2c; if (!cPar.c2c_only) { rms_dist_b_c2p += distProj; if (distProj > max_dist_b_c2p) max_dist_b_c2p = distProj; } myMutex.unlock(); } rms_dist_b_c2p = rms_dist_b_c2p / num; rms_dist_b_c2c = rms_dist_b_c2c / num; metric.c2p_rms = rms_dist_b_c2p; metric.c2c_rms = rms_dist_b_c2c; metric.c2p_hausdorff = max_dist_b_c2p; metric.c2c_hausdorff = max_dist_b_c2c; // from distance to PSNR. cloudA always the original // metric.c2c_psnr = getPSNR( cloudA, metric.c2c_rms, metric.maxDist ); // metric.c2p_psnr = getPSNR( cloudA, metric.c2p_rms, metric.maxDist ); // metric.c2c_hausdorff_psnr = getPSNR( cloudA, metric.c2c_hausdorff, metric.maxDist ); // metric.c2p_hausdorff_psnr = getPSNR( cloudA, metric.c2p_hausdorff, metric.maxDist ); clock_t t3 = clock(); cerr << " Error computing takes " << (t3-t2)/CLOCKS_PER_SEC << " seconds (in CPU time)." << endl; } }; } //~ namespace pcl #endif
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl; /// Matches binding declarations /// Example matches \c foo and \c bar /// (matcher = bindingDecl() /// /// \code /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl> bindingDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches class bases. /// /// Examples matches \c public virtual B. /// \code /// class B {}; /// class C : public virtual B {}; /// \endcode extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename... P> class MatcherT, typename... P, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>> traverse(TraversalKind TK, const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return toString(Node.getAsIntegral(), 10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using-enum declarations. /// /// Given /// \code /// namespace X { enum x {...}; } /// using enum X::x; /// \endcode /// usingEnumDecl() /// matches \code using enum X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl> usingEnumDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches co_return statements. /// /// Given /// \code /// while (true) { co_return; } /// \endcode /// coreturnStmt() /// matches 'co_return' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt> coreturnStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches co_await expressions. /// /// Given /// \code /// co_await 1; /// \endcode /// coawaitExpr() /// matches 'co_await 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr> coawaitExpr; /// Matches co_await expressions where the type of the promise is dependent extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr> dependentCoawaitExpr; /// Matches co_yield expressions. /// /// Given /// \code /// co_yield 1; /// \endcode /// coyieldExpr() /// matches 'co_yield 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr> coyieldExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches function calls and constructor calls /// /// Because CallExpr and CXXConstructExpr do not share a common /// base class with API accessing arguments etc, AST Matchers for code /// which should match both are typically duplicated. This matcher /// removes the need for duplication. /// /// Given code /// \code /// struct ConstructorTakesInt /// { /// ConstructorTakesInt(int i) {} /// }; /// /// void callTakesInt(int i) /// { /// } /// /// void doCall() /// { /// callTakesInt(42); /// } /// /// void doConstruct() /// { /// ConstructorTakesInt cti(42); /// } /// \endcode /// /// The matcher /// \code /// invocation(hasArgument(0, integerLiteral(equals(42)))) /// \endcode /// matches the expression in both doCall and doConstruct extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>({std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>( InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// asString("class X"))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// cxxRecordDecl(hasName("X")))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of a node matches the inner matcher. /// /// Examples: /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x /// /// \code /// auto x = int(3); /// \code /// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int")))) /// matches int(3) /// /// \code /// struct Foo { Foo(int, int); }; /// auto x = Foo(1, 2); /// \code /// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo")))) /// matches Foo(1, 2) /// /// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>, /// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>, /// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>, /// Matcher<CXXUnresolvedConstructExpr>, /// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>, /// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>, /// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>, /// Matcher<TypedefNameDecl> AST_POLYMORPHIC_MATCHER_P( hasTypeLoc, AST_POLYMORPHIC_SUPPORTED_TYPES( BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr, CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr, ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl, ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc, TypedefNameDecl), internal::Matcher<TypeLoc>, Inner) { TypeSourceInfo *source = internal::GetTypeSourceInfo(Node); if (source == nullptr) { // This happens for example for implicit destructors. return false; } return Inner.matches(source->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; unsigned NumArgs = Node.getNumArgs(); if (FProto && FProto->isVariadic()) NumArgs = std::min(NumArgs, FProto->getNumParams()); for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// functionDecl(hasAnyBody(compoundStmt())) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT>( Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::vector<std::string>>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches the DecompositionDecl the binding belongs to. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// bindingDecl(hasName("f"), /// forDecomposition(decompositionDecl()) /// \endcode /// matches 'f' in 'auto &[f, s, t]'. AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>, InnerMatcher) { if (const ValueDecl *VD = Node.getDecomposedDecl()) return InnerMatcher.matches(*VD, Finder, Builder); return false; } /// Matches the Nth binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasBinding(0, /// bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N, internal::Matcher<BindingDecl>, InnerMatcher) { if (Node.bindings().size() <= N) return false; return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder); } /// Matches any binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>, InnerMatcher) { return llvm::any_of(Node.bindings(), [&](const auto *Binding) { return InnerMatcher.matches(*Binding, Finder, Builder); }); } /// Matches declaration of the function the statement belongs to. /// /// Deprecated. Use forCallable() to correctly handle the situation when /// the declaration is not a function (but a block or an Objective-C method). /// forFunction() not only fails to take non-functions into account but also /// may match the wrong declaration in their presence. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches declaration of the function, method, or block the statement /// belongs to. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forCallable(functionDecl(hasName("operator=")))) /// matches 'return *this' /// but does not match 'return v > 0' /// /// Given: /// \code /// -(void) foo { /// int x = 1; /// dispatch_sync(queue, ^{ int y = 2; }); /// } /// \endcode /// declStmt(forCallable(objcMethodDecl())) /// matches 'int x = 1' /// but does not match 'int y = 2'. /// whereas declStmt(forCallable(blockDecl())) /// matches 'int y = 2' /// but does not match 'int x = 1'. AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) { if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) { return true; } } else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) { if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/policy.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); if (image->matte != MagickTrue) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->matte == MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel, PixelPacket *q,IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { PixelPacket *color; if (type == 0) { if (packet_size == 1) SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel)); else SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel)); } color=image->colormap+(ssize_t) ConstrainColormapIndex(image, (ssize_t) GetPixelIndex(indexes+x)); if ((type == 0) && (channels > 1)) return; else SetPixelAlpha(color,pixel); SetPixelRGBO(q,color); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if ((channels < 3) || (type == -2)) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case -3: case 1: { SetPixelGreen(q,pixel); break; } case -4: case 2: { SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); mask->matte=MagickFalse; channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) (void) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) (void) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,(size_t) (psd_info.depth != 16 ? 256 : 65536)); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if (has_merged_image != MagickFalse || imageListLength == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.opacity=TransparentOpacity; (void) SetImageBackgroundColor(image); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; next=image; while (next != (Image *) NULL) { (void) SetImageProfile(next,GetStringInfoName(profile),profile); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { ssize_t i, offset, y; if (next_image->compression == RLECompression) { offset=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) offset+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) offset=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else offset=WriteBlobMSBShort(image,Raw); return((size_t) offset); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, length, offset_length; ssize_t count; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsGrayImage(next_image,&next_image->exception) != MagickFalse)) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { ssize_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return((size_t) count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,&image->exception) != MagickFalse)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); (void) SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(unsigned char) ( mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_binop__min_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int8) // A.*B function (eWiseMult): GB (_AemultB_08__min_int8) // A.*B function (eWiseMult): GB (_AemultB_02__min_int8) // A.*B function (eWiseMult): GB (_AemultB_04__min_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int8) // A*D function (colscale): GB (_AxD__min_int8) // D*A function (rowscale): GB (_DxB__min_int8) // C+=B function (dense accum): GB (_Cdense_accumB__min_int8) // C+=b function (dense accum): GB (_Cdense_accumb__min_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int8) // C=scalar+B GB (_bind1st__min_int8) // C=scalar+B' GB (_bind1st_tran__min_int8) // C=A+scalar GB (_bind2nd__min_int8) // C=A'+scalar GB (_bind2nd_tran__min_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT8 || GxB_NO_MIN_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hello.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int tid, nthreads; printf("Hello world!\n"); #pragma omp parallel private(tid) shared(nthreads) { tid = omp_get_thread_num(); #pragma omp single nthreads = omp_get_num_threads(); #pragma omp critical printf(" ... from thread ID %i.\n", tid); } printf("There were %i threads in total.\n", nthreads); return 0; }
bug_nested_proxy_task.c
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc // REQUIRES: !abt // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* With task dependencies one can generate proxy tasks from an explicit task being executed by a serial task team. The OpenMP runtime library didn't expect that and tries to free the explicit task that is the parent of the proxy task still working in background. It therefore has incomplete children which triggers a debugging assertion. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; #pragma omp taskgroup { /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); #pragma omp task depend(in: dep) { /* * Corresponds to: #pragma omp target nowait { my_sleep( 0.1 ); } */ kmp_task_t *nested_proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task(NULL,gtid,nested_proxy_task); } } // only check that it didn't crash return 0; }
invert.c
/* Copyright 2016. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2016 Jon Tamir <jtamir@eecs.berkeley.edu> */ #include <stdlib.h> #include <assert.h> #include <complex.h> #include <stdio.h> #include "num/multind.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/misc.h" #ifndef DIMS #define DIMS 16 #endif static const char usage_str[] = "<input> <output>"; static const char help_str[] = "Invert array (1 / <input>). The output is set to zero in case of divide by zero.\n"; int main_invert(int argc, char* argv[]) { mini_cmdline(&argc, argv, 2, usage_str, help_str); num_init(); long dims[DIMS]; complex float* idata = load_cfl(argv[1], DIMS, dims); complex float* odata = create_cfl(argv[2], DIMS, dims); #pragma omp parallel for for (long i = 0; i < md_calc_size(DIMS, dims); i++) odata[i] = idata[i] == 0 ? 0. : 1. / idata[i]; unmap_cfl(DIMS, dims, idata); unmap_cfl(DIMS, dims, odata); return 0; }
GB_binop__eq_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_03__eq_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_bool) // A*D function (colscale): GB (_AxD__eq_bool) // D*A function (rowscale): GB (_DxB__eq_bool) // C+=B function (dense accum): GB (_Cdense_accumB__eq_bool) // C+=b function (dense accum): GB (_Cdense_accumb__eq_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_bool) // C=scalar+B GB (_bind1st__eq_bool) // C=scalar+B' GB (_bind1st_tran__eq_bool) // C=A+scalar GB (_bind2nd__eq_bool) // C=A'+scalar GB (_bind2nd_tran__eq_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_BOOL || GxB_NO_EQ_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bget_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int32) // C=scalar+B GB (_bind1st__bget_int32) // C=scalar+B' GB (_bind1st_tran__bget_int32) // C=A+scalar GB (_bind2nd__bget_int32) // C=A'+scalar GB (_bind2nd_tran__bget_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bget_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bget_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi-v21.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define FINAL_PI 2 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using just two threads */ // WARNING : correct code #pragma omp parallel #pragma omp single { #if _DEBUG_ int id = omp_get_thread_num(); #endif for (i=0; i < num_steps; i++) { #pragma omp task private(x) firstprivate(i) shared(sum) { #if !_DEBUG_ Extrae_event (PROGRAM, PI_COMPUTATION); #endif x = (i+0.5)*step; #pragma omp atomic sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #else Extrae_event (PROGRAM, END); #endif } } #pragma omp taskwait #pragma omp task #if !_DEBUG_ { Extrae_event (PROGRAM, FINAL_PI); #endif pi = step * sum; #if !_DEBUG_ Extrae_event (PROGRAM, END); } #endif } #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads); if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); int errorCount = 0; #pragma omp parallel num_threads(threads) reduction(+: errorCount) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; EIGEN_TRY { if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } EIGEN_CATCH(...) { ++errorCount; } } if (errorCount) printf("assert exception\n"); //EIGEN_THROW_X(Eigen::eigen_assert_exception()); #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
shallow_water_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED #define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED // System includes // External includes // Project includes #include "includes/model_part.h" namespace Kratos { ///@addtogroup ShallowWaterApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities { public: ///@name Type Definitions ///@{ /// Pointer definition of ShallowWaterUtilities KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities); ///@} ///@name Life Cycle ///@{ /// Default constructor. /// Destructor. ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void ComputeFreeSurfaceElevation(ModelPart& rModelPart); void ComputeHeightFromFreeSurface(ModelPart& rModelPart); void ComputeVelocity(ModelPart& rModelPart); void ComputeMomentum(ModelPart& rModelPart); void ComputeAccelerations(ModelPart& rModelPart); void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart); void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag); void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0); void ResetDryDomain(ModelPart& rModelPart, double Thickness = 0.0); template<class TContainerType> void DeactivateDryEntities(TContainerType& rContainer, Flags WetFlag) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it = rContainer.begin() + i; it->Set(ACTIVE, it->Is(WetFlag)); } } void NormalizeVector(ModelPart& rModelPart, Variable<array_1d<double,3>>& rVariable); template<class TVarType> void CopyVariableToPreviousTimeStep(ModelPart& rModelPart, const TVarType& rVariable) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i) { auto const it_node = rModelPart.NodesBegin() + i; it_node->FastGetSolutionStepValue(rVariable,1) = it_node->FastGetSolutionStepValue(rVariable); } } void SetMinimumValue(ModelPart& rModelPart, const Variable<double>& rVariable, double MinValue); /* * @brief This method sets the z-coordinate of the mesh to zero */ void SetMeshZCoordinateToZero(ModelPart& rModelPart); /* * @brief This method moves the z-coordinate of the mesh according to a variable */ void SetMeshZCoordinate(ModelPart& rModelPart, const Variable<double>& rVariable); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} }; // Class ShallowWaterUtilities ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
convolution_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+1<inch; q+=2) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* kernel0 = kernel + p*inch*4 + q*4; const float* kernel1 = kernel0 + 4; const float* r00 = img0; const float* r01 = img0 + w; const float* r10 = img1; const float* r11 = img1 + w; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v14.4s}, [%4], #16 \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v9.4s}, [%5] \n" "fmul v8.4s, v0.4s, %12.s[0] \n" "fmla v9.4s, v2.4s, %12.s[2] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v8.4s, v12.4s, %13.s[0] \n" "fmla v9.4s, v14.4s, %13.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v13.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v15.4s}, [%4], #16 \n" "fmla v8.4s, v10.4s, %12.s[1] \n" "fmla v9.4s, v11.4s, %12.s[3] \n" "ext v10.16b, v12.16b, v13.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #4 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "fmla v9.4s, v11.4s, %13.s[3] \n" "orr v0.16b, v1.16b, v1.16b \n" "orr v2.16b, v3.16b, v3.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v12.16b, v13.16b, v13.16b \n" "orr v14.16b, v15.16b, v15.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%5], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d28-d29}, [%4]! \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d18-d19}, [%5] \n"// q9 = sum "vmul.f32 q8, q0, %e12[0] \n" "vmla.f32 q9, q2, %f12[0] \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q10, q0, q1, #1 \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q8, q12, %e13[0] \n" "vmla.f32 q9, q14, %f13[0] \n" "pld [%3, #128] \n" "vld1.f32 {d26-d27}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d30-d31}, [%4]! \n" "vmla.f32 q8, q10, %e12[1] \n" "vmla.f32 q9, q11, %f12[1] \n" "vext.f32 q10, q12, q13, #1 \n" "vext.f32 q11, q14, q15, #1 \n" "vmla.f32 q8, q10, %e13[1] \n" "vmla.f32 q9, q11, %f13[1] \n" "vorr q0, q1, q1 \n" "vorr q2, q3, q3 \n" "vadd.f32 q8, q8, q9 \n" "vorr q12, q13, q13 \n" "vorr q14, q15, q15 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%5]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" "sub %3, #16 \n" "sub %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r00 = vld1_f32(r00); float32x2_t _r01 = vld1_f32(r01); float32x4_t _r00r1 = vcombine_f32(_r00, _r01); float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0); float32x2_t _r10 = vld1_f32(r10); float32x2_t _r11 = vld1_f32(r11); float32x4_t _r10r1 = vcombine_f32(_r10, _r11); _s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r00[0] * kernel0[0]; sum += r00[1] * kernel0[1]; sum += r01[0] * kernel0[2]; sum += r01[1] * kernel0[3]; sum += r10[0] * kernel1[0]; sum += r10[1] * kernel1[1]; sum += r11[0] * kernel1[2]; sum += r11[1] * kernel1[3]; *outptr += sum; #endif // __ARM_NEON r00 += 1; r01 += 1; r10 += 1; r11 += 1; outptr++; } r00 += 1; r01 += 1; r10 += 1; r11 += 1; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*4 + q*4; const float* r0 = img0; const float* r1 = img0 + w; #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(kernel0[0]); float32x4_t _k1 = vdupq_n_f32(kernel0[1]); float32x4_t _k2 = vdupq_n_f32(kernel0[2]); float32x4_t _k3 = vdupq_n_f32(kernel0[3]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v9.4s}, [%3] \n" "fmul v8.4s, v0.4s, %8.4s \n" "fmla v9.4s, v2.4s, %10.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "fmla v8.4s, v10.4s, %9.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v9.4s, v11.4s, %11.4s \n" "orr v0.16b, v1.16b, v1.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v2.16b, v3.16b, v3.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%3], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d18-d19}, [%3] \n"// q9 = sum "vmul.f32 q8, q0, %q8 \n" "vmla.f32 q9, q2, %q10 \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "vext.f32 q10, q0, q1, #1 \n" "vmla.f32 q8, q10, %q9 \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q9, q11, %q11 \n" "vorr q0, q1, q1 \n" "vadd.f32 q8, q8, q9 \n" "vorr q2, q3, q3 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%3]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); } #endif // __aarch64__ #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); #endif for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r0 = vld1_f32(r0); float32x2_t _r1 = vld1_f32(r1); float32x4_t _r0r1 = vcombine_f32(_r0, _r1); float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r0[0] * kernel0[0]; sum += r0[1] * kernel0[1]; sum += r1[0] * kernel0[2]; sum += r1[1] * kernel0[3]; *outptr += sum; #endif r0 += 1; r1 += 1; outptr++; } r0 += 1; r1 += 1; } } } }
jacobian_calculation.h
#ifndef JACOBIAN_CALCULATION_H #define JACOBIAN_CALCULATION_H #include "neural_nets\training_options.h" namespace neural_nets { namespace detail { template <typename T, typename sys_type> boost::numeric::ublas::matrix<T> calc_jacobian_numerically(sys_type const &sys_, boost::numeric::ublas::matrix<T> const &inputs_, lm_options<T> const &options_) { size_t out_cnt = sys_.get_output_count(); std::vector<T> weights(sys_.get_parameter_count()); sys_.get_parameters(weights.begin(), weights.end()); boost::numeric::ublas::matrix<T> jacobian(inputs_.size1()*sys_.get_output_count(), sys_.get_parameter_count()); auto jacobian_for_body = [&](size_t i) { sys_type sys(sys_); sys_type tmp_sys(sys_); std::vector<T> tmp_weights = weights, out_before(out_cnt), out_after(out_cnt); T epsilon = detail::math_utils::calc_optimal_epsilon(tmp_weights[i]); tmp_weights[i] -= epsilon; tmp_sys.set_parameters(tmp_weights.begin(), tmp_weights.end()); for (size_t j = 0; j < inputs_.size1(); ++j) { sys(std::next(inputs_.begin1(), j).begin(), std::next(inputs_.begin1(), j).end(), out_before.begin(), out_before.end()); tmp_sys(std::next(inputs_.begin1(), j).begin(), std::next(inputs_.begin1(), j).end(), out_after.begin(), out_after.end()); for (size_t k = 0; k < out_cnt; ++k) { jacobian(j*out_cnt + k, i) = (out_before[k] - out_after[k]) / epsilon; } } }; if (options_.use_parallelization) { #pragma omp parallel for for (size_t i = 0; i < jacobian.size2(); ++i) { jacobian_for_body(i); } } else { for (size_t i = 0; i < jacobian.size2(); ++i) { jacobian_for_body(i); } } return jacobian; } } } #endif
GB_unaryop__lnot_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_int32 // op(A') function: GB_tran__lnot_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_int32 ( uint16_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6319.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; for (t4 = 1; t4 <= nx - 1; t4 += 1) for (t6 = 0; t6 <= ny - 1; t6 += 1) ey[t4][t6] = ey[t4][t6] - 0.5 * (hz[t4][t6] - hz[t4 - 1][t6]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
Reductions.h
#ifndef _REDUCTIONS_H_ #define _REDUCTIONS_H_ #include <mpi.h> #include <sys/time.h> #include <iostream> #include <iomanip> #include <functional> #include <algorithm> #include <vector> #include <string> #include <sstream> #include "CombBLAS/CombBLAS.h" #include "Glue.h" #include "CCGrid.h" namespace combblas { /*************************************************************************** * Distribute a local m/sqrt(p) x n/sqrt(p) matrix (represented by a list of tuples) across layers * so that a each processor along the third dimension receives m/sqrt(p) x n/(c*sqrt(p)) submatrices. * After receiving c submatrices, they are merged to create one m/sqrt(p) x n/(c*sqrt(p)) matrix. * Assumption: input tuples are deleted * Inputs: * fibWorld: Communicator along the third dimension * localmerged: input array of tuples, which will be distributed across layers * Output: output array of tuples, after distributing across layers and merging locally in the received processor * ***************************************************************************/ template <typename SR, typename IT, typename NT> SpTuples<IT,NT> * ParallelReduce_Alltoall_threaded(MPI_Comm & fibWorld, SpTuples<IT,NT> * & localmerged) { double comp_begin, comm_begin, comp_time=0, comm_time=0; int fprocs, fibrank; MPI_Comm_size(fibWorld,&fprocs); MPI_Comm_rank(fibWorld,&fibrank); IT mdim = localmerged->getnrow(); IT ndim = localmerged->getncol(); if(fprocs == 1) { return localmerged; } // ------------ find splitters to distributed across layers ----------- comp_begin = MPI_Wtime(); std::vector<int> send_sizes(fprocs); std::vector<int> recv_sizes(fprocs); std::vector<int> recv_offsets(fprocs); std::vector<int> send_offsets = findColSplitters<int>(localmerged, fprocs); for(int i=0; i<fprocs; i++) { send_sizes[i] = send_offsets[i+1] - send_offsets[i]; } comp_time += (MPI_Wtime() - comp_begin); // ------------ Communicate counts ----------- comm_begin = MPI_Wtime(); MPI_Alltoall( send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1, MPI_INT,fibWorld); comm_time += (MPI_Wtime() - comm_begin); MPI_Datatype MPI_triple; MPI_Type_contiguous(sizeof(std::tuple<IT,IT,NT>), MPI_CHAR, &MPI_triple); MPI_Type_commit(&MPI_triple); // ------------ Allocate memory to receive data ----------- comp_begin = MPI_Wtime(); int recv_count = 0; for( int i = 0; i < fprocs; i++ ) { recv_count += recv_sizes[i]; } std::tuple<IT,IT,NT> * recvbuf = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[recv_count]))); recv_offsets[0] = 0; for( int i = 1; i < fprocs; i++ ) { recv_offsets[i] = recv_offsets[i-1]+recv_sizes[i-1]; } comp_time += (MPI_Wtime() - comp_begin); // ------------ Communicate split tuples ----------- comm_begin = MPI_Wtime(); MPI_Alltoallv( localmerged->tuples, send_sizes.data(), send_offsets.data(), MPI_triple, recvbuf, recv_sizes.data(), recv_offsets.data(), MPI_triple, fibWorld); // WARNING: is this big enough? comm_time += (MPI_Wtime() - comm_begin); // -------- update column indices of split tuples ---------- comp_begin = MPI_Wtime(); IT ndimSplit = ndim/fprocs; if(fibrank==(fprocs-1)) ndimSplit = ndim - ndimSplit * fibrank; IT coloffset = fibrank * ndimSplit; #pragma omp parallel for for(int k=0; k<recv_count; k++) { std::get<1>(recvbuf[k]) = std::get<1>(recvbuf[k]) - coloffset; } // -------- create vector of SpTuples for MultiwayMerge ---------- std::vector< SpTuples<IT,NT>* > lists; for(int i=0; i< fprocs; ++i) { SpTuples<IT, NT>* spTuples = new SpTuples<IT, NT> (recv_sizes[i], mdim, ndimSplit, &recvbuf[recv_offsets[i]], true); // If needed pass an empty object of proper dimension lists.push_back(spTuples); } // -------- merge received tuples ---------- SpTuples<IT,NT> * globalmerged = MultiwayMerge<SR>(lists, mdim, ndimSplit, false); comp_time += (MPI_Wtime() - comp_begin); comp_reduce_layer += comp_time; comm_reduce += comm_time; ::operator delete(recvbuf); delete localmerged; // not sure if we can call ::operator delete here return globalmerged; } template <typename NT, typename IT> SpDCCols<IT,NT> * ReduceAll_threaded(std::vector< SpTuples<IT,NT>* > & unreducedC, CCGrid & CMG) { typedef PlusTimesSRing<double, double> PTDD; IT mdim = unreducedC[0]->getnrow(); IT ndim = unreducedC[0]->getncol(); // ------ merge list of tuples from n/sqrt(p) stages of SUMMA ------- double loc_beg1 = MPI_Wtime(); //SpTuples<IT, NT>* localmerged = multiwayMerge(unreducedC, true); SpTuples<IT, NT>* localmerged = MultiwayMerge<PTDD>(unreducedC, mdim, ndim, true); comp_reduce += (MPI_Wtime() - loc_beg1); // scatter local tuples across layers SpTuples<IT,NT> * mergedSpTuples = ParallelReduce_Alltoall_threaded<PTDD>(CMG.fiberWorld, localmerged); loc_beg1 = MPI_Wtime(); // TODO: this is not a good constructor. Change it back to SpTuple-based constructor SpDCCols<IT,NT> * reducedC = new SpDCCols<IT,NT>(mergedSpTuples->getnrow(), mergedSpTuples->getncol(), mergedSpTuples->getnnz(), mergedSpTuples->tuples, false); comp_result += (MPI_Wtime() - loc_beg1); delete mergedSpTuples; // too expensive return reducedC; } } #endif
irbuilder_unroll_partial_heuristic.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic(float *a, float *b, float *c, float *d) { #pragma omp unroll partial for (int i = 0; i < 2; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP9]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
GB_binop__land_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__land_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__land_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint64) // A*D function (colscale): GB (_AxD__land_uint64) // D*A function (rowscale): GB (_DxB__land_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint64) // C=scalar+B GB (_bind1st__land_uint64) // C=scalar+B' GB (_bind1st_tran__land_uint64) // C=A+scalar GB (_bind2nd__land_uint64) // C=A'+scalar GB (_bind2nd_tran__land_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT64 || GxB_NO_LAND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
paddle_tensor_impl.h
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <cmath> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" #include "unsupported/Eigen/CXX11/Tensor" #include "./type_utils.h" namespace common { using u128 = unsigned __int128; template <typename T> void PaddleTensor<T>::reshape(const std::vector<size_t> &shape) { std::vector<int64_t> shape_(shape.cbegin(), shape.cend()); paddle::framework::DDim dim(shape_.data(), shape_.size()); // 0 for default size _tensor.mutable_data<T>(dim, place(), 0); } template <typename T> void PaddleTensor<T>::add(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x + eigen_y; } template <typename T> void PaddleTensor<T>::sum(TensorAdapter<T> *ret) const { auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(1, ret_->_tensor.numel(), "Result numel should be one."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto dims = paddle::framework::make_ddim({}); auto eigen_z = paddle::framework::EigenTensor<T, 0>::From(ret_->_tensor, dims); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x.sum(); } template <typename T> void PaddleTensor<T>::sub(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x - eigen_y; } template <typename T> void PaddleTensor<T>::mul(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x * eigen_y; } template <typename T> void PaddleTensor<T>::div(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto div_ = [](T a, T b) -> T { return a / b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), div_); } template <typename T> void PaddleTensor<T>::mat_mul(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool transpose_lhs, bool transpose_rhs, bool sum_reduce_batch) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); auto &mat_a = _tensor; auto &mat_b = rhs_->_tensor; auto &mat_out = ret_->_tensor; // tensor with dims like [ h, w ] or [ batch_size , h, w ] is matrix auto is_matrix = [](const paddle::framework::Tensor &t) -> bool { return t.dims().size() == 2 || t.dims().size() == 3; }; PADDLE_ENFORCE(mat_a.place() == mat_b.place() && mat_a.place() == mat_out.place(), "The places of matrices must be same"); PADDLE_ENFORCE(is_matrix(mat_a) && is_matrix(mat_b) && is_matrix(mat_out), "The input and output of matmul must be matrix " "or batched matrix."); PADDLE_ENFORCE(mat_a.dims().size() >= mat_b.dims().size(), "Only following dims are supported: " "Mat A is [BatchSize, H, W] and Mat B is [BatchSize, H, W]." "Mat A is [BatchSize, H, W] and Mat B is [H, W]." "Mat A is [H, W] and Mat B is [H, W]."); using EigenTensor = paddle::framework::EigenTensor<T, 3>; using EigenTensor4 = paddle::framework::EigenTensor<T, 4>; using EigenTensor2 = paddle::framework::EigenTensor<T, 2>; auto to_const_eigen_tensor = [](const paddle::framework::Tensor &t) { auto dims = t.dims(); if (dims.size() == 2) { dims = paddle::framework::make_ddim({1, dims[0], dims[1]}); } return EigenTensor::From(t, dims); }; auto to_eigen_tensor = [](paddle::framework::Tensor &t) { auto dims = t.dims(); if (dims.size() == 2) { dims = paddle::framework::make_ddim({1, dims[0], 1, dims[1]}); } else { // dims.size() == 3 dims = paddle::framework::make_ddim({dims[0], dims[1], 1, dims[2]}); } return EigenTensor4::From(t, dims); }; auto to_eigen_tensor2 = [](paddle::framework::Tensor &t) { auto dims = t.dims(); if (dims.size() == 2) { dims = paddle::framework::make_ddim({dims[0], dims[1]}); } else { // dims.size() == 3 PADDLE_ENFORCE(dims[0] == 1, "expected BatchSize = 1."); dims = paddle::framework::make_ddim({dims[1], dims[2]}); } return EigenTensor2::From(t, dims); }; auto &place = *eigen_device(); auto t_a = to_const_eigen_tensor(mat_a); auto t_b = to_const_eigen_tensor(mat_b); auto t_c = to_eigen_tensor(mat_out); PADDLE_ENFORCE(t_a.dimension(2 - transpose_lhs) == t_b.dimension(1 + transpose_rhs), "W_A != H_B."); auto batch_size = t_a.dimension(0); auto batch_size_b = t_b.dimension(0); PADDLE_ENFORCE(batch_size_b == batch_size || batch_size_b == 1, "Mat B BatchSize mismatched."); PADDLE_ENFORCE(t_c.dimension(0) == sum_reduce_batch ? 1 : batch_size, "Result Mat BatchSize mismatched."); auto hc = t_c.dimension(1); auto wc = t_c.dimension(3); // matrix product of tensor contractions // please refer to // github.com/eigenteam/eigen-git-mirror/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md if (sum_reduce_batch) { Eigen::array<Eigen::IndexPair<int>, 2> axis = { Eigen::IndexPair<int>(0, 0), Eigen::IndexPair<int>(2 - transpose_lhs, 1 + transpose_rhs)}; auto t_c = to_eigen_tensor2(mat_out); t_c.device(place) = t_a.contract(t_b, axis); } else{ Eigen::array<Eigen::IndexPair<int>, 1> axis = { Eigen::IndexPair<int>(1 - transpose_lhs, 0 + transpose_rhs)}; #pragma omp for for (int i = 0; i < batch_size; ++i) { Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor, Eigen::DenseIndex>> t_c_chip(t_c.data() + i * hc * wc, hc, wc); int idx_t_b = batch_size_b == 1 ? 0 : i; t_c_chip.device(place) = t_a.chip(i, 0).contract(t_b.chip(idx_t_b, 0), axis); } } } template <typename T> void PaddleTensor<T>::negative(TensorAdapter<T> *ret) const { auto neg_ = [](T a) -> T { return -a; }; std::transform(data(), data() + numel(), ret->data(), neg_); } template <typename T> void PaddleTensor<T>::bitwise_and(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto and_ = [](T a, T b) -> T { return a & b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), and_); } template <typename T> void PaddleTensor<T>::bitwise_or(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto or_ = [](T a, T b) -> T { return a | b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), or_); } template <typename T> void PaddleTensor<T>::bitwise_not(TensorAdapter<T> *ret) const { auto not_ = [](T a) -> T { return ~a; }; std::transform(data(), data() + numel(), ret->data(), not_); } template <typename T> void PaddleTensor<T>::bitwise_xor(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto xor_ = [](T a, T b) -> T { return a ^ b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), xor_); } template <typename T> void PaddleTensor<T>::lshift(size_t rhs, TensorAdapter<T> *ret) const { auto lshift_functor = [rhs](T a) -> T { return a << rhs; }; std::transform(data(), data() + numel(), ret->data(), lshift_functor); } template <typename T> void PaddleTensor<T>::rshift(size_t rhs, TensorAdapter<T> *ret) const { auto rshift_functor = [rhs](T a) -> T { return a >> rhs; }; std::transform(data(), data() + numel(), ret->data(), rshift_functor); } template <typename T> void PaddleTensor<T>::logical_rshift(size_t rhs, TensorAdapter<T> *ret) const { auto logical_rshift_functor = [rhs](T a) -> T { const size_t word_len = sizeof(T) * 8; T mask = (T)1 << word_len - rhs - 1; mask |= mask - 1; mask = rhs >= word_len ? 0 : mask; return a >> rhs & mask; }; std::transform(data(), data() + numel(), ret->data(), logical_rshift_functor); } template <typename T> void PaddleTensor<T>::add128(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const __int128, 1>; using Type = Eigen::Tensor<u128, 1>; size_t numel_ = ret->numel() / (sizeof(u128) / sizeof(T)); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : *(rhs->data() + i); } Eigen::TensorMap<Type> z(reinterpret_cast<u128*>(ret->data()), numel_); auto &place = *eigen_device(); z.device(place) = x + y; } template <typename T> void PaddleTensor<T>::sub128(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const u128, 1>; using Type = Eigen::Tensor<u128, 1>; size_t numel_ = ret->numel() / (sizeof(u128) / sizeof(T)); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : static_cast<typename unsigned_type<T>::value_type>(*(rhs->data() + i)); } Eigen::TensorMap<Type> z(reinterpret_cast<u128*>(ret->data()), numel_); auto &place = *eigen_device(); z.device(place) = x - y; } template <typename T> void PaddleTensor<T>::mul128_with_truncate(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const __int128, 1>; using Type = Eigen::Tensor<__int128, 1>; size_t numel_ = ret->numel(); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : static_cast<typename unsigned_type<T>::value_type>(*(rhs->data() + i)); } Eigen::TensorMap<Eigen::Tensor<T, 1>> z(ret->data(), numel_); Type xy = x * y; Eigen::Tensor<T, 1> xy_trunc(numel_); // truncate for (size_t i = 0; i < numel_; ++i) { __int128 tmp = xy(i); xy_trunc(i) = (T)(tmp >> _scaling_factor); } auto &place = *eigen_device(); z.device(place) = xy_trunc; } template <typename T> template <typename U> PaddleTensor<T> & PaddleTensor<T>::from_float_point_type(const paddle::framework::Tensor &tensor, size_t scaling_factor) { double scale = std::pow(2, scaling_factor); auto cast = [scale](U a) -> T { return a * scale; }; _tensor.mutable_data<T>(tensor.dims(), place(), 0); std::transform(tensor.template data<U>(), tensor.template data<U>() + tensor.numel(), _tensor.template data<T>(), cast); this->scaling_factor() = scaling_factor; return *this; } template <typename T> template <typename U> PaddleTensor<T> &PaddleTensor<T>::from_float_point_scalar( const U &scalar, const std::vector<size_t> &shape, size_t scaling_factor) { double scale = std::pow(2, scaling_factor); auto trans = [scale, scalar](T) -> T { return scalar * scale; }; reshape(shape); std::transform(_tensor.template data<T>(), _tensor.template data<T>() + _tensor.numel(), _tensor.template data<T>(), trans); this->scaling_factor() = scaling_factor; return *this; } template <typename T> void PaddleTensor<T>::slice(size_t begin_idx, size_t end_idx, TensorAdapter<T> *ret) const { auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); ret_->_tensor = _tensor.Slice(begin_idx, end_idx); ret->scaling_factor() = scaling_factor(); } template<typename T> std::shared_ptr<TensorAdapter<T>> PaddleTensor<T>::operator[](size_t index) { PADDLE_ENFORCE_GT(this->shape().size(), 1, "lhs's shape must great than 1."); auto slice_shape = this->shape(); slice_shape.erase(slice_shape.begin()); std::shared_ptr<PaddleTensor<T>> ret = std::make_shared<PaddleTensor<T>>(_device_ctx); ret->reshape(slice_shape); this->slice(index, index + 1, ret.get()); ret->reshape(slice_shape); return ret; } template<typename T> const std::shared_ptr<TensorAdapter<T>> PaddleTensor<T>::operator[](size_t index) const { return const_cast<PaddleTensor*>(this)->operator[](index); } } // namespace common
convolution_pack8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { int8x8_t _val0 = vld1_dup_s8(sptr + space_ofs[k] * 8); int8x8_t _val1 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 1); int8x8_t _val2 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 2); int8x8_t _val3 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 3); int8x8_t _val4 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 4); int8x8_t _val5 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 5); int8x8_t _val6 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 6); int8x8_t _val7 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 7); int8x8_t _w0 = vld1_s8(kptr); int8x8_t _w1 = vld1_s8(kptr + 8); int8x8_t _w2 = vld1_s8(kptr + 16); int8x8_t _w3 = vld1_s8(kptr + 24); int8x8_t _w4 = vld1_s8(kptr + 32); int8x8_t _w5 = vld1_s8(kptr + 40); int8x8_t _w6 = vld1_s8(kptr + 48); int8x8_t _w7 = vld1_s8(kptr + 56); int16x8_t _wv0 = vmull_s8(_w0, _val0); _wv0 = vmlal_s8(_wv0, _w1, _val1); int16x8_t _wv2 = vmull_s8(_w2, _val2); _wv2 = vmlal_s8(_wv2, _w3, _val3); int16x8_t _wv4 = vmull_s8(_w4, _val4); _wv4 = vmlal_s8(_wv4, _w5, _val5); int16x8_t _wv6 = vmull_s8(_w6, _val6); _wv6 = vmlal_s8(_wv6, _w7, _val7); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv0)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv2)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv2)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv4)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv6)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv6)); kptr += 64; } } vst1q_s32(outptr + j * 8, _sum0); vst1q_s32(outptr + j * 8 + 4, _sum1); } outptr += outw * 8; } } }
for.c
#include <omp.h> #define N 10 int main (int argc, char * argv[]){ double a[N]; int i; #pragma omp parallel #pragma omp for for(i=0; i<N; i++) a[i] = 0; }