source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
npb_mg.c | # 1 "main.c"
# 0 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 21 "/opt/pgi/linux86-64/17.10/include/_c_macros.h" <System_Header>
# 1 "main.c"
# 4 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 27 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef int omp_lock_t ;
# 29 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
struct omp_nest_lock {
omp_lock_t act ;
short cnt ;
short tid ;
} ;
# 37 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef struct omp_nest_lock omp_nest_lock_t ;
# 41 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef enum omp_sched_t {
omp_sched_static = 1 ,
omp_sched_dynamic = 2 ,
omp_sched_guided = 3 ,
omp_sched_auto = 4
} omp_sched_t ;
# 52 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
extern void omp_set_num_threads ( int n ) ;
extern int omp_get_thread_num ( void ) ;
extern int omp_get_num_procs ( void ) ;
extern int omp_get_num_threads ( void ) ;
extern int omp_get_max_threads ( void ) ;
extern int omp_in_parallel ( void ) ;
extern int omp_in_final ( void ) ;
extern void omp_set_dynamic ( int n ) ;
extern int omp_get_dynamic ( void ) ;
extern void omp_set_nested ( int n ) ;
extern int omp_get_nested ( void ) ;
extern void omp_init_lock ( omp_lock_t * s ) ;
extern void omp_destroy_lock ( omp_lock_t * s ) ;
extern void omp_set_lock ( omp_lock_t * s ) ;
extern void omp_unset_lock ( omp_lock_t * s ) ;
extern int omp_test_lock ( omp_lock_t * s ) ;
extern void omp_init_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_destroy_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_set_nest_lock ( omp_nest_lock_t * s ) ;
extern void omp_unset_nest_lock ( omp_nest_lock_t * s ) ;
extern int omp_test_nest_lock ( omp_nest_lock_t * s ) ;
extern double omp_get_wtime ( void ) ;
extern double omp_get_wtick ( void ) ;
extern long omp_get_stack_size ( void ) ;
extern void omp_set_stack_size ( long l ) ;
extern int omp_get_thread_limit ( void ) ;
extern void omp_set_max_active_levels ( int ) ;
extern int omp_get_max_active_levels ( void ) ;
extern int omp_get_level ( void ) ;
extern int omp_get_ancestor_thread_num ( int ) ;
extern int omp_get_team_size ( int ) ;
extern int omp_get_active_level ( void ) ;
extern void omp_set_schedule ( omp_sched_t , int ) ;
extern void omp_get_schedule ( omp_sched_t * , int * ) ;
extern int omp_get_initial_device ( ) ;
extern int omp_get_default_device ( ) ;
extern void omp_set_default_device ( int ) ;
# 89 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 1 "/usr/include/stdlib.h" <System_Header>
# 16 "/usr/include/stdlib.h" <System_Header>
# 20 "/usr/include/stdlib.h" <System_Header>
# 24 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 93 "/usr/include/features.h" <System_Header>
# 96 "/usr/include/features.h" <System_Header>
# 123 "/usr/include/features.h" <System_Header>
# 134 "/usr/include/features.h" <System_Header>
# 145 "/usr/include/features.h" <System_Header>
# 156 "/usr/include/features.h" <System_Header>
# 181 "/usr/include/features.h" <System_Header>
# 191 "/usr/include/features.h" <System_Header>
# 197 "/usr/include/features.h" <System_Header>
# 203 "/usr/include/features.h" <System_Header>
# 212 "/usr/include/features.h" <System_Header>
# 220 "/usr/include/features.h" <System_Header>
# 344 "/usr/include/features.h" <System_Header>
# 345 "/usr/include/features.h" <System_Header>
# 1 "/usr/include/stdc-predef.h" <System_Header>
# 16 "/usr/include/stdc-predef.h" <System_Header>
# 27 "/usr/include/stdc-predef.h" <System_Header>
# 34 "/usr/include/stdc-predef.h" <System_Header>
# 54 "/usr/include/stdc-predef.h" <System_Header>
# 57 "/usr/include/stdc-predef.h" <System_Header>
# 346 "/usr/include/features.h" <System_Header>
# 352 "/usr/include/features.h" <System_Header>
# 357 "/usr/include/features.h" <System_Header>
# 364 "/usr/include/features.h" <System_Header>
# 367 "/usr/include/features.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 81 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 91 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 96 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 110 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 121 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 131 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 147 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 173 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 202 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 209 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 217 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 227 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 234 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 243 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 252 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 264 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 274 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 283 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 291 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 305 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 313 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 328 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 347 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 356 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 361 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 368 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 410 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 411 "/usr/include/x86_64-linux-gnu/sys/cdefs.h" <System_Header>
# 15 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 18 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 45 "/opt/pgi/linux86-64/17.10/include-gcc50/sys/cdefs.h" <System_Header>
# 368 "/usr/include/features.h" <System_Header>
# 371 "/usr/include/features.h" <System_Header>
# 379 "/usr/include/features.h" <System_Header>
# 390 "/usr/include/features.h" <System_Header>
# 391 "/usr/include/features.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 3 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 10 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header>
# 4 "/usr/include/x86_64-linux-gnu/gnu/stubs-64.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/gnu/stubs.h" <System_Header>
# 392 "/usr/include/features.h" <System_Header>
# 25 "/usr/include/stdlib.h" <System_Header>
# 26 "/usr/include/stdlib.h" <System_Header>
# 32 "/usr/include/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 216 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef unsigned long int size_t ;
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 292 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 312 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 328 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef int wchar_t ;
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 33 "/usr/include/stdlib.h" <System_Header>
# 40 "/usr/include/stdlib.h" <System_Header>
# 41 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/waitflags.h" <System_Header>
typedef enum
{
P_ALL ,
P_PID ,
P_PGID
} idtype_t ;
# 42 "/usr/include/stdlib.h" <System_Header>
# 42 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 36 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 52 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 55 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 64 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 21 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 22 "/usr/include/endian.h" <System_Header>
# 29 "/usr/include/endian.h" <System_Header>
# 35 "/usr/include/endian.h" <System_Header>
# 36 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/endian.h" <System_Header>
# 37 "/usr/include/endian.h" <System_Header>
# 39 "/usr/include/endian.h" <System_Header>
# 59 "/usr/include/endian.h" <System_Header>
# 60 "/usr/include/endian.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned char __u_char ;
typedef unsigned short int __u_short ;
typedef unsigned int __u_int ;
typedef unsigned long int __u_long ;
# 35 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef signed char __int8_t ;
typedef unsigned char __uint8_t ;
typedef signed short int __int16_t ;
typedef unsigned short int __uint16_t ;
typedef signed int __int32_t ;
typedef unsigned int __uint32_t ;
# 43 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef signed long int __int64_t ;
typedef unsigned long int __uint64_t ;
# 50 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 52 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __quad_t ;
typedef unsigned long int __u_quad_t ;
# 87 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 116 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 121 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 79 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 82 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/bits/typesizes.h" <System_Header>
# 122 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 124 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __dev_t ;
typedef unsigned int __uid_t ;
typedef unsigned int __gid_t ;
typedef unsigned long int __ino_t ;
typedef unsigned long int __ino64_t ;
typedef unsigned int __mode_t ;
typedef unsigned long int __nlink_t ;
typedef long int __off_t ;
typedef long int __off64_t ;
typedef int __pid_t ;
typedef struct { int __val [ 2 ] ; } __fsid_t ;
typedef long int __clock_t ;
typedef unsigned long int __rlim_t ;
typedef unsigned long int __rlim64_t ;
typedef unsigned int __id_t ;
typedef long int __time_t ;
typedef unsigned int __useconds_t ;
typedef long int __suseconds_t ;
# 143 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef int __daddr_t ;
typedef int __key_t ;
# 146 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef int __clockid_t ;
# 149 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef void * __timer_t ;
# 152 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __blksize_t ;
# 155 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 157 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __blkcnt_t ;
typedef long int __blkcnt64_t ;
# 161 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __fsblkcnt_t ;
typedef unsigned long int __fsblkcnt64_t ;
# 165 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned long int __fsfilcnt_t ;
typedef unsigned long int __fsfilcnt64_t ;
# 169 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __fsword_t ;
# 172 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __ssize_t ;
# 174 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __syscall_slong_t ;
typedef unsigned long int __syscall_ulong_t ;
# 180 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef __off64_t __loff_t ;
typedef __quad_t * __qaddr_t ;
typedef char * __caddr_t ;
# 185 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef long int __intptr_t ;
# 188 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
typedef unsigned int __socklen_t ;
# 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 34 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 35 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
# 44 "/usr/include/x86_64-linux-gnu/bits/byteswap-16.h" <System_Header>
static unsigned short int
__bswap_16 ( unsigned short int __bsx )
{
return ( ( unsigned short int ) ( ( ( ( __bsx ) >> 8 ) & 0xff ) | ( ( ( __bsx ) & 0xff ) << 8 ) ) ) ;
}
# 36 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
# 87 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
static unsigned int
__bswap_32 ( unsigned int __bsx )
{
return ( ( ( ( __bsx ) & 0xff000000 ) >> 24 ) | ( ( ( __bsx ) & 0x00ff0000 ) >> 8 ) | ( ( ( __bsx ) & 0x0000ff00 ) << 8 ) | ( ( ( __bsx ) & 0x000000ff ) << 24 ) ) ;
}
# 148 "/usr/include/x86_64-linux-gnu/bits/byteswap.h" <System_Header>
static __uint64_t
__bswap_64 ( __uint64_t __bsx )
{
return ( ( ( ( __bsx ) & 0xff00000000000000ull ) >> 56 ) | ( ( ( __bsx ) & 0x00ff000000000000ull ) >> 40 ) | ( ( ( __bsx ) & 0x0000ff0000000000ull ) >> 24 ) | ( ( ( __bsx ) & 0x000000ff00000000ull ) >> 8 ) | ( ( ( __bsx ) & 0x00000000ff000000ull ) << 8 ) | ( ( ( __bsx ) & 0x0000000000ff0000ull ) << 24 ) | ( ( ( __bsx ) & 0x000000000000ff00ull ) << 40 ) | ( ( ( __bsx ) & 0x00000000000000ffull ) << 56 ) ) ;
}
# 61 "/usr/include/endian.h" <System_Header>
# 65 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
union wait
{
int w_status ;
struct
{
# 72 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
unsigned int __w_termsig : 7 ;
unsigned int __w_coredump : 1 ;
unsigned int __w_retcode : 8 ;
unsigned int : 16 ;
# 83 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
} __wait_terminated ;
struct
{
# 87 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
unsigned int __w_stopval : 8 ;
unsigned int __w_stopsig : 8 ;
unsigned int : 16 ;
# 96 "/usr/include/x86_64-linux-gnu/bits/waitstatus.h" <System_Header>
} __wait_stopped ;
} ;
# 43 "/usr/include/stdlib.h" <System_Header>
# 47 "/usr/include/stdlib.h" <System_Header>
# 60 "/usr/include/stdlib.h" <System_Header>
# 83 "/usr/include/stdlib.h" <System_Header>
# 96 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
int quot ;
int rem ;
} div_t ;
# 103 "/usr/include/stdlib.h" <System_Header>
# 105 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
long int quot ;
long int rem ;
} ldiv_t ;
# 116 "/usr/include/stdlib.h" <System_Header>
typedef struct
{
long long int quot ;
long long int rem ;
} lldiv_t ;
# 127 "/usr/include/stdlib.h" <System_Header>
# 132 "/usr/include/stdlib.h" <System_Header>
# 137 "/usr/include/stdlib.h" <System_Header>
# 139 "/usr/include/stdlib.h" <System_Header>
extern size_t __ctype_get_mb_cur_max ( void ) ;
# 143 "/usr/include/stdlib.h" <System_Header>
extern double atof ( const char * __nptr )
;
extern int atoi ( const char * __nptr )
;
extern long int atol ( const char * __nptr )
;
# 156 "/usr/include/stdlib.h" <System_Header>
extern long long int atoll ( const char * __nptr )
;
# 163 "/usr/include/stdlib.h" <System_Header>
extern double strtod ( const char * __restrict __nptr ,
char * * __restrict __endptr )
;
# 171 "/usr/include/stdlib.h" <System_Header>
extern float strtof ( const char * __restrict __nptr ,
char * * __restrict __endptr ) ;
# 175 "/usr/include/stdlib.h" <System_Header>
extern long double strtold ( const char * __restrict __nptr ,
char * * __restrict __endptr )
;
# 182 "/usr/include/stdlib.h" <System_Header>
extern long int strtol ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
extern unsigned long int strtoul ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 193 "/usr/include/stdlib.h" <System_Header>
# 195 "/usr/include/stdlib.h" <System_Header>
extern long long int strtoq ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 200 "/usr/include/stdlib.h" <System_Header>
extern unsigned long long int strtouq ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 207 "/usr/include/stdlib.h" <System_Header>
# 209 "/usr/include/stdlib.h" <System_Header>
extern long long int strtoll ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 214 "/usr/include/stdlib.h" <System_Header>
extern unsigned long long int strtoull ( const char * __restrict __nptr ,
char * * __restrict __endptr , int __base )
;
# 304 "/usr/include/stdlib.h" <System_Header>
extern char * l64a ( long int __n ) ;
# 307 "/usr/include/stdlib.h" <System_Header>
extern long int a64l ( const char * __s )
;
# 314 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 20 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __u_char u_char ;
typedef __u_short u_short ;
typedef __u_int u_int ;
typedef __u_long u_long ;
typedef __quad_t quad_t ;
typedef __u_quad_t u_quad_t ;
typedef __fsid_t fsid_t ;
# 44 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __loff_t loff_t ;
# 48 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __ino_t ino_t ;
# 60 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __dev_t dev_t ;
# 65 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __gid_t gid_t ;
# 70 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __mode_t mode_t ;
# 75 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __nlink_t nlink_t ;
# 80 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __uid_t uid_t ;
# 86 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __off_t off_t ;
# 98 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __pid_t pid_t ;
# 104 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __id_t id_t ;
# 109 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __ssize_t ssize_t ;
# 115 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __daddr_t daddr_t ;
typedef __caddr_t caddr_t ;
# 122 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __key_t key_t ;
# 132 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 55 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 56 "/usr/include/time.h" <System_Header>
# 58 "/usr/include/time.h" <System_Header>
typedef __clock_t clock_t ;
# 71 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 72 "/usr/include/time.h" <System_Header>
# 74 "/usr/include/time.h" <System_Header>
typedef __time_t time_t ;
# 88 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 89 "/usr/include/time.h" <System_Header>
# 90 "/usr/include/time.h" <System_Header>
typedef __clockid_t clockid_t ;
# 100 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 101 "/usr/include/time.h" <System_Header>
# 102 "/usr/include/time.h" <System_Header>
typedef __timer_t timer_t ;
# 133 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 146 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 147 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 149 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned long int ulong ;
typedef unsigned short int ushort ;
typedef unsigned int uint ;
# 155 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 159 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 162 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef char int8_t ;
typedef short int int16_t ;
typedef int int32_t ;
# 166 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef long int int64_t ;
# 172 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned char u_int8_t ;
typedef unsigned short int u_int16_t ;
typedef unsigned int u_int32_t ;
# 177 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef unsigned long int u_int64_t ;
# 182 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef int register_t ;
# 215 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 216 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 217 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 218 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 219 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/select.h" <System_Header>
# 31 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 32 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
typedef int __sig_atomic_t ;
# 24 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
typedef struct
{
unsigned long int __val [ ( 1024 / ( 8 * sizeof ( unsigned long int ) ) ) ] ;
} __sigset_t ;
# 39 "/usr/include/x86_64-linux-gnu/bits/sigset.h" <System_Header>
# 34 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __sigset_t sigset_t ;
# 40 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 116 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 117 "/usr/include/time.h" <System_Header>
# 119 "/usr/include/time.h" <System_Header>
struct timespec
{
__time_t tv_sec ;
__syscall_slong_t tv_nsec ;
} ;
# 44 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
struct timeval
{
__time_t tv_sec ;
__suseconds_t tv_usec ;
} ;
# 46 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 48 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __suseconds_t suseconds_t ;
# 53 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef long int __fd_mask ;
# 56 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 58 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef struct
{
# 72 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
__fd_mask __fds_bits [ 1024 / ( 8 * ( int ) sizeof ( __fd_mask ) ) ] ;
# 75 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
} fd_set ;
# 77 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 81 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
typedef __fd_mask fd_mask ;
# 84 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 89 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 105 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
extern int select ( int __nfds , fd_set * __restrict __readfds ,
fd_set * __restrict __writefds ,
fd_set * __restrict __exceptfds ,
struct timeval * __restrict __timeout ) ;
# 117 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
extern int pselect ( int __nfds , fd_set * __restrict __readfds ,
fd_set * __restrict __writefds ,
fd_set * __restrict __exceptfds ,
const struct timespec * __restrict __timeout ,
const __sigset_t * __restrict __sigmask ) ;
# 126 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 220 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 221 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 222 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned int gnu_dev_major ( unsigned long long int __dev )
;
# 30 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned int gnu_dev_minor ( unsigned long long int __dev )
;
# 33 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
extern unsigned long long int gnu_dev_makedev ( unsigned int __major ,
unsigned int __minor )
;
# 60 "/usr/include/x86_64-linux-gnu/sys/sysmacros.h" <System_Header>
# 223 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 228 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __blksize_t blksize_t ;
# 232 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 235 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __blkcnt_t blkcnt_t ;
# 239 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __fsblkcnt_t fsblkcnt_t ;
# 243 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
typedef __fsfilcnt_t fsfilcnt_t ;
# 268 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 270 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 59 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef unsigned long int pthread_t ;
# 63 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
union pthread_attr_t
{
char __size [ 56 ] ;
long int __align ;
} ;
# 69 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union pthread_attr_t pthread_attr_t ;
# 75 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef struct __pthread_internal_list
{
struct __pthread_internal_list * __prev ;
struct __pthread_internal_list * __next ;
} __pthread_list_t ;
# 89 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
struct __pthread_mutex_s
{
int __lock ;
unsigned int __count ;
int __owner ;
# 98 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned int __nusers ;
# 100 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
int __kind ;
# 104 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
short __spins ;
short __elision ;
__pthread_list_t __list ;
# 108 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
# 125 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
} __data ;
char __size [ 40 ] ;
long int __align ;
} pthread_mutex_t ;
# 130 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_mutexattr_t ;
# 138 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
struct
{
int __lock ;
unsigned int __futex ;
unsigned long long int __total_seq ;
unsigned long long int __wakeup_seq ;
unsigned long long int __woken_seq ;
void * __mutex ;
unsigned int __nwaiters ;
unsigned int __broadcast_seq ;
} __data ;
char __size [ 48 ] ;
long long int __align ;
} pthread_cond_t ;
# 156 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_condattr_t ;
# 163 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef unsigned int pthread_key_t ;
# 167 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef int pthread_once_t ;
# 173 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
# 177 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
struct
{
int __lock ;
unsigned int __nr_readers ;
unsigned int __readers_wakeup ;
unsigned int __writer_wakeup ;
unsigned int __nr_readers_queued ;
unsigned int __nr_writers_queued ;
int __writer ;
int __shared ;
signed char __rwelision ;
# 192 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned char __pad1 [ 7 ] ;
# 195 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
unsigned long int __pad2 ;
unsigned int __flags ;
# 200 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
} __data ;
# 220 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
char __size [ 56 ] ;
long int __align ;
} pthread_rwlock_t ;
# 224 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 8 ] ;
long int __align ;
} pthread_rwlockattr_t ;
# 233 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef volatile int pthread_spinlock_t ;
# 238 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 32 ] ;
long int __align ;
} pthread_barrier_t ;
# 245 "/usr/include/x86_64-linux-gnu/bits/pthreadtypes.h" <System_Header>
typedef union
{
char __size [ 4 ] ;
int __align ;
} pthread_barrierattr_t ;
# 271 "/usr/include/x86_64-linux-gnu/sys/types.h" <System_Header>
# 315 "/usr/include/stdlib.h" <System_Header>
# 319 "/usr/include/stdlib.h" <System_Header>
extern long int random ( void ) ;
# 323 "/usr/include/stdlib.h" <System_Header>
extern void srandom ( unsigned int __seed ) ;
# 329 "/usr/include/stdlib.h" <System_Header>
extern char * initstate ( unsigned int __seed , char * __statebuf ,
size_t __statelen ) ;
# 334 "/usr/include/stdlib.h" <System_Header>
extern char * setstate ( char * __statebuf ) ;
# 341 "/usr/include/stdlib.h" <System_Header>
# 343 "/usr/include/stdlib.h" <System_Header>
struct random_data
{
int32_t * fptr ;
int32_t * rptr ;
int32_t * state ;
int rand_type ;
int rand_deg ;
int rand_sep ;
int32_t * end_ptr ;
} ;
# 354 "/usr/include/stdlib.h" <System_Header>
extern int random_r ( struct random_data * __restrict __buf ,
int32_t * __restrict __result ) ;
# 357 "/usr/include/stdlib.h" <System_Header>
extern int srandom_r ( unsigned int __seed , struct random_data * __buf )
;
# 360 "/usr/include/stdlib.h" <System_Header>
extern int initstate_r ( unsigned int __seed , char * __restrict __statebuf ,
size_t __statelen ,
struct random_data * __restrict __buf )
;
# 365 "/usr/include/stdlib.h" <System_Header>
extern int setstate_r ( char * __restrict __statebuf ,
struct random_data * __restrict __buf )
;
# 373 "/usr/include/stdlib.h" <System_Header>
extern int rand ( void ) ;
extern void srand ( unsigned int __seed ) ;
# 380 "/usr/include/stdlib.h" <System_Header>
extern int rand_r ( unsigned int * __seed ) ;
# 386 "/usr/include/stdlib.h" <System_Header>
# 388 "/usr/include/stdlib.h" <System_Header>
extern double drand48 ( void ) ;
extern double erand48 ( unsigned short int __xsubi [ 3 ] ) ;
# 392 "/usr/include/stdlib.h" <System_Header>
extern long int lrand48 ( void ) ;
extern long int nrand48 ( unsigned short int __xsubi [ 3 ] )
;
# 397 "/usr/include/stdlib.h" <System_Header>
extern long int mrand48 ( void ) ;
extern long int jrand48 ( unsigned short int __xsubi [ 3 ] )
;
# 402 "/usr/include/stdlib.h" <System_Header>
extern void srand48 ( long int __seedval ) ;
extern unsigned short int * seed48 ( unsigned short int __seed16v [ 3 ] )
;
extern void lcong48 ( unsigned short int __param [ 7 ] ) ;
# 411 "/usr/include/stdlib.h" <System_Header>
struct drand48_data
{
unsigned short int __x [ 3 ] ;
unsigned short int __old_x [ 3 ] ;
unsigned short int __c ;
unsigned short int __init ;
unsigned long long int __a ;
} ;
# 422 "/usr/include/stdlib.h" <System_Header>
extern int drand48_r ( struct drand48_data * __restrict __buffer ,
double * __restrict __result ) ;
extern int erand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
double * __restrict __result ) ;
# 429 "/usr/include/stdlib.h" <System_Header>
extern int lrand48_r ( struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
extern int nrand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
# 438 "/usr/include/stdlib.h" <System_Header>
extern int mrand48_r ( struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
extern int jrand48_r ( unsigned short int __xsubi [ 3 ] ,
struct drand48_data * __restrict __buffer ,
long int * __restrict __result )
;
# 447 "/usr/include/stdlib.h" <System_Header>
extern int srand48_r ( long int __seedval , struct drand48_data * __buffer )
;
# 451 "/usr/include/stdlib.h" <System_Header>
extern int seed48_r ( unsigned short int __seed16v [ 3 ] ,
struct drand48_data * __buffer ) ;
# 454 "/usr/include/stdlib.h" <System_Header>
extern int lcong48_r ( unsigned short int __param [ 7 ] ,
struct drand48_data * __buffer )
;
# 465 "/usr/include/stdlib.h" <System_Header>
extern void * malloc ( size_t __size ) ;
extern void * calloc ( size_t __nmemb , size_t __size )
;
# 476 "/usr/include/stdlib.h" <System_Header>
# 479 "/usr/include/stdlib.h" <System_Header>
extern void * realloc ( void * __ptr , size_t __size )
;
extern void free ( void * __ptr ) ;
# 487 "/usr/include/stdlib.h" <System_Header>
extern void cfree ( void * __ptr ) ;
# 492 "/usr/include/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 22 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 23 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 26 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 29 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 33 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
# 39 "/opt/pgi/linux86-64/17.10/include/alloca.h" <System_Header>
extern void * __alloca ( size_t __size ) ;
extern void * alloca ( size_t __size ) ;
extern void * __builtin_alloca ( size_t __size ) ;
# 493 "/usr/include/stdlib.h" <System_Header>
# 497 "/usr/include/stdlib.h" <System_Header>
extern void * valloc ( size_t __size ) ;
# 502 "/usr/include/stdlib.h" <System_Header>
extern int posix_memalign ( void * * __memptr , size_t __alignment , size_t __size )
;
# 514 "/usr/include/stdlib.h" <System_Header>
extern void abort ( void ) __attribute__ ( ( __noreturn__ ) ) ;
# 518 "/usr/include/stdlib.h" <System_Header>
extern int atexit ( void ( * __func ) ( void ) ) ;
# 534 "/usr/include/stdlib.h" <System_Header>
extern int on_exit ( void ( * __func ) ( int __status , void * __arg ) , void * __arg )
;
# 542 "/usr/include/stdlib.h" <System_Header>
extern void exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ;
# 556 "/usr/include/stdlib.h" <System_Header>
extern void _Exit ( int __status ) __attribute__ ( ( __noreturn__ ) ) ;
# 563 "/usr/include/stdlib.h" <System_Header>
extern char * getenv ( const char * __name ) ;
# 575 "/usr/include/stdlib.h" <System_Header>
# 577 "/usr/include/stdlib.h" <System_Header>
extern int putenv ( char * __string ) ;
# 583 "/usr/include/stdlib.h" <System_Header>
extern int setenv ( const char * __name , const char * __value , int __replace )
;
# 587 "/usr/include/stdlib.h" <System_Header>
extern int unsetenv ( const char * __name ) ;
# 594 "/usr/include/stdlib.h" <System_Header>
extern int clearenv ( void ) ;
# 605 "/usr/include/stdlib.h" <System_Header>
extern char * mktemp ( char * __template ) ;
# 617 "/usr/include/stdlib.h" <System_Header>
# 619 "/usr/include/stdlib.h" <System_Header>
extern int mkstemp ( char * __template ) ;
# 639 "/usr/include/stdlib.h" <System_Header>
# 641 "/usr/include/stdlib.h" <System_Header>
extern int mkstemps ( char * __template , int __suffixlen ) ;
# 661 "/usr/include/stdlib.h" <System_Header>
extern char * mkdtemp ( char * __template ) ;
# 715 "/usr/include/stdlib.h" <System_Header>
extern int system ( const char * __command ) ;
# 732 "/usr/include/stdlib.h" <System_Header>
extern char * realpath ( const char * __restrict __name ,
char * __restrict __resolved ) ;
# 738 "/usr/include/stdlib.h" <System_Header>
# 741 "/usr/include/stdlib.h" <System_Header>
typedef int ( * __compar_fn_t ) ( const void * , const void * ) ;
# 753 "/usr/include/stdlib.h" <System_Header>
extern void * bsearch ( const void * __key , const void * __base ,
size_t __nmemb , size_t __size , __compar_fn_t __compar )
;
# 763 "/usr/include/stdlib.h" <System_Header>
extern void qsort ( void * __base , size_t __nmemb , size_t __size ,
__compar_fn_t __compar ) ;
# 773 "/usr/include/stdlib.h" <System_Header>
extern int abs ( int __x ) __attribute__ ( ( __const__ ) ) ;
extern long int labs ( long int __x ) __attribute__ ( ( __const__ ) ) ;
# 779 "/usr/include/stdlib.h" <System_Header>
extern long long int llabs ( long long int __x )
__attribute__ ( ( __const__ ) ) ;
# 786 "/usr/include/stdlib.h" <System_Header>
extern div_t div ( int __numer , int __denom )
__attribute__ ( ( __const__ ) ) ;
extern ldiv_t ldiv ( long int __numer , long int __denom )
__attribute__ ( ( __const__ ) ) ;
# 796 "/usr/include/stdlib.h" <System_Header>
extern lldiv_t lldiv ( long long int __numer ,
long long int __denom )
__attribute__ ( ( __const__ ) ) ;
# 806 "/usr/include/stdlib.h" <System_Header>
# 810 "/usr/include/stdlib.h" <System_Header>
extern char * ecvt ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign ) ;
# 816 "/usr/include/stdlib.h" <System_Header>
extern char * fcvt ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign ) ;
# 822 "/usr/include/stdlib.h" <System_Header>
extern char * gcvt ( double __value , int __ndigit , char * __buf )
;
# 828 "/usr/include/stdlib.h" <System_Header>
extern char * qecvt ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign )
;
extern char * qfcvt ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign )
;
extern char * qgcvt ( long double __value , int __ndigit , char * __buf )
;
# 840 "/usr/include/stdlib.h" <System_Header>
extern int ecvt_r ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign , char * __restrict __buf ,
size_t __len ) ;
extern int fcvt_r ( double __value , int __ndigit , int * __restrict __decpt ,
int * __restrict __sign , char * __restrict __buf ,
size_t __len ) ;
# 848 "/usr/include/stdlib.h" <System_Header>
extern int qecvt_r ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign ,
char * __restrict __buf , size_t __len )
;
extern int qfcvt_r ( long double __value , int __ndigit ,
int * __restrict __decpt , int * __restrict __sign ,
char * __restrict __buf , size_t __len )
;
# 861 "/usr/include/stdlib.h" <System_Header>
extern int mblen ( const char * __s , size_t __n ) ;
# 864 "/usr/include/stdlib.h" <System_Header>
extern int mbtowc ( wchar_t * __restrict __pwc ,
const char * __restrict __s , size_t __n ) ;
# 868 "/usr/include/stdlib.h" <System_Header>
extern int wctomb ( char * __s , wchar_t __wchar ) ;
# 872 "/usr/include/stdlib.h" <System_Header>
extern size_t mbstowcs ( wchar_t * __restrict __pwcs ,
const char * __restrict __s , size_t __n ) ;
extern size_t wcstombs ( char * __restrict __s ,
const wchar_t * __restrict __pwcs , size_t __n )
;
# 886 "/usr/include/stdlib.h" <System_Header>
extern int rpmatch ( const char * __response ) ;
# 897 "/usr/include/stdlib.h" <System_Header>
extern int getsubopt ( char * * __restrict __optionp ,
char * const * __restrict __tokens ,
char * * __restrict __valuep )
;
# 911 "/usr/include/stdlib.h" <System_Header>
# 949 "/usr/include/stdlib.h" <System_Header>
extern int getloadavg ( double __loadavg [ ] , int __nelem )
;
# 954 "/usr/include/stdlib.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/stdlib-float.h" <System_Header>
# 955 "/usr/include/stdlib.h" <System_Header>
# 956 "/usr/include/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 442 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 456 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 459 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
int __builtin_abs ( int ) ;
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
# 90 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
extern void * omp_target_alloc ( size_t , int ) ;
extern void omp_target_free ( void * , int ) ;
extern int omp_target_memcpy ( void * , void * , size_t , size_t , size_t , int , int ) ;
# 94 "/opt/pgi/linux86-64/17.10/include/omp.h" <System_Header>
typedef int _Atomic_word ;
extern void _mp_atomic_add ( int * , int ) ;
extern void _mp_exchange_and_add ( int * , int ) ;
# 5 "main.c"
# 5 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 27 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
typedef enum {
acc_device_none = 0 ,
acc_device_default = 1 ,
acc_device_host = 2 ,
acc_device_not_host = 3 ,
acc_device_nvidia = 4 ,
acc_device_radeon = 5 ,
acc_device_xeonphi = 6 ,
acc_device_pgi_opencl = 7 ,
acc_device_nvidia_opencl = 8 ,
acc_device_opencl = 9
} acc_device_t ;
# 45 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
void acc_set_default_async ( int async ) ;
int acc_get_default_async ( void ) ;
extern int acc_get_num_devices ( acc_device_t devtype ) ;
extern acc_device_t acc_get_device ( void ) ;
extern void acc_set_device_num ( int devnum , acc_device_t devtype ) ;
extern int acc_get_device_num ( acc_device_t devtype ) ;
extern void acc_init ( acc_device_t devtype ) ;
extern void acc_shutdown ( acc_device_t devtype ) ;
extern void acc_set_deviceid ( int devid ) ;
extern int acc_get_deviceid ( int devnum , acc_device_t devtype ) ;
extern int acc_async_test ( long async ) ;
extern int acc_async_test_all ( void ) ;
extern void acc_async_wait ( long async ) ;
extern void acc_async_wait_all ( void ) ;
extern void acc_wait ( long async ) ;
extern void acc_wait_async ( long arg , long async ) ;
extern void acc_wait_all ( void ) ;
extern void acc_wait_all_async ( long async ) ;
extern int acc_on_device ( acc_device_t devtype ) ;
extern void acc_free ( void * ) ;
# 66 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_memcpy ( void * targetptr , void * srcptr , unsigned long bytes ) ;
extern void * acc_memcpy_async ( void * targetptr , void * srcptr , unsigned long bytes , long async ) ;
extern void * acc_copyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_copyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_pcopyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_pcopyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_present_or_copyin ( void * hostptr , unsigned long bytes ) ;
extern void * acc_present_or_copyin_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_create ( void * hostptr , unsigned long bytes ) ;
extern void * acc_create_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_pcreate ( void * hostptr , unsigned long bytes ) ;
extern void * acc_pcreate_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void * acc_present_or_create ( void * hostptr , unsigned long bytes ) ;
extern void * acc_present_or_create_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_copyout ( void * hostptr , unsigned long bytes ) ;
extern void acc_copyout_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_delete ( void * hostptr , unsigned long bytes ) ;
extern void acc_delete_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_device ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_device_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_self ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_self_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_update_host ( void * hostptr , unsigned long bytes ) ;
extern void acc_update_host_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_memcpy_to_device ( void * devptr , void * hostptr , unsigned long bytes ) ;
extern void acc_memcpy_to_device_async ( void * devptr , void * hostptr , unsigned long bytes , long async ) ;
extern void acc_memcpy_from_device ( void * hostptr , void * devptr , unsigned long bytes ) ;
extern void acc_memcpy_from_device_async ( void * hostptr , void * devptr , unsigned long bytes , long async ) ;
extern void * acc_memcpy_device ( void * targetdevptr , void * srcdevptr , unsigned long bytes ) ;
extern void * acc_memcpy_device_async ( void * targetdevptr , void * srcdevptr , unsigned long bytes , long async ) ;
extern void acc_attach ( void * * hostptrptr ) ;
extern void acc_attach_async ( void * * hostptrptr , long async ) ;
extern void acc_detach ( void * * hostptrptr ) ;
extern void acc_detach_async ( void * * hostptrptr , long async ) ;
# 101 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void acc_set_device_type ( acc_device_t devtype ) ;
extern acc_device_t acc_get_device_type ( void ) ;
extern void * acc_malloc ( unsigned long ) ;
extern void * acc_deviceptr ( void * hostptr ) ;
extern void * acc_hostptr ( void * devptr ) ;
extern void acc_map_data ( void * hostptr , void * devptr , unsigned long bytes ) ;
extern void acc_unmap_data ( void * hostptr ) ;
extern int acc_is_present ( void * hostptr , unsigned long bytes ) ;
extern int acc_present_count ( void * hostptr ) ;
extern void acc_updatein ( void * hostptr , unsigned long bytes ) ;
extern void acc_updatein_async ( void * hostptr , unsigned long bytes , long async ) ;
extern void acc_updateout ( void * hostptr , unsigned long bytes ) ;
extern void acc_updateout_async ( void * hostptr , unsigned long bytes , long async ) ;
# 115 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_get_current_cuda_context ( void ) ;
extern int acc_get_current_cuda_device ( void ) ;
extern void * acc_get_cuda_stream ( long ) ;
extern void acc_set_cuda_stream ( long , void * ) ;
extern void * acc_cuda_get_context ( int ) ;
extern int acc_cuda_get_device ( int ) ;
# 122 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern void * acc_get_current_opencl_context ( void ) ;
extern void * acc_get_current_opencl_device ( void ) ;
extern void * acc_get_opencl_queue ( long ) ;
# 126 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicaddi ( void * address , int val ) ;
extern unsigned int atomicaddu ( void * address , unsigned int val ) ;
extern unsigned long long atomicaddul ( void * address , unsigned long long val ) ;
extern float atomicaddf ( void * address , float val ) ;
extern double atomicaddd ( void * address , double val ) ;
# 133 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicsubi ( void * address , int val ) ;
extern unsigned int atomicsubu ( void * address , unsigned int val ) ;
extern unsigned long long atomicsubul ( void * address , unsigned long long val ) ;
extern float atomicsubf ( void * address , float val ) ;
extern double atomicsubd ( void * address , double val ) ;
# 139 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicmaxi ( void * address , int val ) ;
extern unsigned int atomicmaxu ( void * address , unsigned int val ) ;
extern unsigned long long atomicmaxul ( void * address , unsigned long long val ) ;
extern float atomicmaxf ( void * address , float val ) ;
extern double atomicmaxd ( void * address , double val ) ;
# 145 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicmini ( void * address , int val ) ;
extern unsigned int atomicminu ( void * address , unsigned int val ) ;
extern unsigned long long atomicminul ( void * address , unsigned long long val ) ;
extern float atomicminf ( void * address , float val ) ;
extern double atomicmind ( void * address , double val ) ;
# 151 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicandi ( void * address , int val ) ;
extern unsigned int atomicandu ( void * address , unsigned int val ) ;
extern unsigned long long atomicandul ( void * address , unsigned long long val ) ;
# 155 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicori ( void * address , int val ) ;
extern unsigned int atomicoru ( void * address , unsigned int val ) ;
extern unsigned long long atomicorul ( void * address , unsigned long long val ) ;
# 159 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicxori ( void * address , int val ) ;
extern unsigned int atomicxoru ( void * address , unsigned int val ) ;
extern unsigned long long atomicxorul ( void * address , unsigned long long val ) ;
# 163 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomicexchi ( void * address , int val ) ;
extern unsigned int atomicexchu ( void * address , unsigned int val ) ;
extern unsigned long long atomicexchul ( void * address , unsigned long long val ) ;
extern float atomicexchf ( void * address , float val ) ;
extern double atomicexchd ( void * address , double val ) ;
# 169 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern unsigned int atomicincu ( void * address , unsigned int val ) ;
# 171 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern unsigned int atomicdecu ( void * address , unsigned int val ) ;
# 173 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int atomiccasi ( void * address , int val , int val2 ) ;
extern unsigned int atomiccasu ( void * address , unsigned int val , unsigned int val2 ) ;
extern unsigned long long atomiccasul ( void * address , unsigned long long val , unsigned long long val2 ) ;
extern float atomiccasf ( void * address , float val , float val2 ) ;
extern double atomiccasd ( void * address , double val , double val2 ) ;
# 179 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
extern int __pgi_gangidx ( void ) ;
extern int __pgi_workeridx ( void ) ;
extern int __pgi_vectoridx ( void ) ;
extern int __pgi_blockidx ( int ) ;
extern int __pgi_threadidx ( int ) ;
# 6 "main.c"
# 6 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 27 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 28 "/usr/include/stdio.h" <System_Header>
# 33 "/usr/include/stdio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 34 "/usr/include/stdio.h" <System_Header>
# 35 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 36 "/usr/include/stdio.h" <System_Header>
# 43 "/usr/include/stdio.h" <System_Header>
struct _IO_FILE ;
# 47 "/usr/include/stdio.h" <System_Header>
typedef struct _IO_FILE FILE ;
# 63 "/usr/include/stdio.h" <System_Header>
typedef struct _IO_FILE __FILE ;
# 74 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/libio.h" <System_Header>
# 26 "/usr/include/libio.h" <System_Header>
# 31 "/usr/include/libio.h" <System_Header>
# 1 "/usr/include/_G_config.h" <System_Header>
# 2 "/usr/include/_G_config.h" <System_Header>
# 7 "/usr/include/_G_config.h" <System_Header>
# 9 "/usr/include/_G_config.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 10 "/usr/include/_G_config.h" <System_Header>
# 15 "/usr/include/_G_config.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/usr/include/_G_config.h" <System_Header>
# 20 "/usr/include/_G_config.h" <System_Header>
# 1 "/usr/include/wchar.h" <System_Header>
# 16 "/usr/include/wchar.h" <System_Header>
# 21 "/usr/include/wchar.h" <System_Header>
# 51 "/usr/include/wchar.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 357 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
typedef unsigned int wint_t ;
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 52 "/usr/include/wchar.h" <System_Header>
# 54 "/usr/include/wchar.h" <System_Header>
# 64 "/usr/include/wchar.h" <System_Header>
# 73 "/usr/include/wchar.h" <System_Header>
# 81 "/usr/include/wchar.h" <System_Header>
typedef struct
{
int __count ;
union
{
# 88 "/usr/include/wchar.h" <System_Header>
unsigned int __wch ;
# 92 "/usr/include/wchar.h" <System_Header>
char __wchb [ 4 ] ;
} __value ;
} __mbstate_t ;
# 100 "/usr/include/wchar.h" <System_Header>
# 901 "/usr/include/wchar.h" <System_Header>
# 21 "/usr/include/_G_config.h" <System_Header>
# 21 "/usr/include/_G_config.h" <System_Header>
typedef struct
{
__off_t __pos ;
__mbstate_t __state ;
} _G_fpos_t ;
typedef struct
{
__off64_t __pos ;
__mbstate_t __state ;
} _G_fpos64_t ;
# 45 "/usr/include/_G_config.h" <System_Header>
# 53 "/usr/include/_G_config.h" <System_Header>
# 32 "/usr/include/libio.h" <System_Header>
# 32 "/usr/include/libio.h" <System_Header>
# 47 "/usr/include/libio.h" <System_Header>
# 49 "/usr/include/libio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 24 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
# 34 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef struct __pgi_tag {
unsigned int gp_offset ;
unsigned int fp_offset ;
char * overflow_arg_area ;
char * reg_save_area ;
} __pgi_va_list [ 1 ] ;
# 49 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef __pgi_va_list va_list ;
# 60 "/opt/pgi/linux86-64/17.10/include/va_list.h" <System_Header>
typedef __pgi_va_list __gnuc_va_list ;
# 25 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
extern void * __builtin_va_arg ( ) ;
extern int __builtin_va_start ( ) ;
# 60 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 50 "/usr/include/libio.h" <System_Header>
# 90 "/usr/include/libio.h" <System_Header>
# 124 "/usr/include/libio.h" <System_Header>
# 144 "/usr/include/libio.h" <System_Header>
struct _IO_jump_t ; struct _IO_FILE ;
# 146 "/usr/include/libio.h" <System_Header>
# 150 "/usr/include/libio.h" <System_Header>
typedef void _IO_lock_t ;
# 154 "/usr/include/libio.h" <System_Header>
# 156 "/usr/include/libio.h" <System_Header>
struct _IO_marker {
struct _IO_marker * _next ;
struct _IO_FILE * _sbuf ;
int _pos ;
# 173 "/usr/include/libio.h" <System_Header>
} ;
# 175 "/usr/include/libio.h" <System_Header>
enum __codecvt_result
{
__codecvt_ok ,
__codecvt_partial ,
__codecvt_error ,
__codecvt_noconv
} ;
# 241 "/usr/include/libio.h" <System_Header>
struct _IO_FILE {
int _flags ;
# 245 "/usr/include/libio.h" <System_Header>
char * _IO_read_ptr ;
char * _IO_read_end ;
char * _IO_read_base ;
char * _IO_write_base ;
char * _IO_write_ptr ;
char * _IO_write_end ;
char * _IO_buf_base ;
char * _IO_buf_end ;
char * _IO_save_base ;
char * _IO_backup_base ;
char * _IO_save_end ;
# 260 "/usr/include/libio.h" <System_Header>
struct _IO_marker * _markers ;
# 262 "/usr/include/libio.h" <System_Header>
struct _IO_FILE * _chain ;
# 264 "/usr/include/libio.h" <System_Header>
int _fileno ;
# 268 "/usr/include/libio.h" <System_Header>
int _flags2 ;
# 270 "/usr/include/libio.h" <System_Header>
__off_t _old_offset ;
# 273 "/usr/include/libio.h" <System_Header>
unsigned short _cur_column ;
signed char _vtable_offset ;
char _shortbuf [ 1 ] ;
# 278 "/usr/include/libio.h" <System_Header>
# 280 "/usr/include/libio.h" <System_Header>
_IO_lock_t * _lock ;
# 289 "/usr/include/libio.h" <System_Header>
__off64_t _offset ;
# 297 "/usr/include/libio.h" <System_Header>
void * __pad1 ;
void * __pad2 ;
void * __pad3 ;
void * __pad4 ;
# 302 "/usr/include/libio.h" <System_Header>
size_t __pad5 ;
int _mode ;
char _unused2 [ 15 * sizeof ( int ) - 4 * sizeof ( void * ) - sizeof ( size_t ) ] ;
# 307 "/usr/include/libio.h" <System_Header>
} ;
# 310 "/usr/include/libio.h" <System_Header>
typedef struct _IO_FILE _IO_FILE ;
# 313 "/usr/include/libio.h" <System_Header>
struct _IO_FILE_plus ;
# 315 "/usr/include/libio.h" <System_Header>
extern struct _IO_FILE_plus _IO_2_1_stdin_ ;
extern struct _IO_FILE_plus _IO_2_1_stdout_ ;
extern struct _IO_FILE_plus _IO_2_1_stderr_ ;
# 329 "/usr/include/libio.h" <System_Header>
# 332 "/usr/include/libio.h" <System_Header>
typedef __ssize_t __io_read_fn ( void * __cookie , char * __buf , size_t __nbytes ) ;
# 340 "/usr/include/libio.h" <System_Header>
typedef __ssize_t __io_write_fn ( void * __cookie , const char * __buf ,
size_t __n ) ;
# 349 "/usr/include/libio.h" <System_Header>
typedef int __io_seek_fn ( void * __cookie , __off64_t * __pos , int __w ) ;
# 352 "/usr/include/libio.h" <System_Header>
typedef int __io_close_fn ( void * __cookie ) ;
# 385 "/usr/include/libio.h" <System_Header>
extern int __underflow ( _IO_FILE * ) ;
extern int __uflow ( _IO_FILE * ) ;
extern int __overflow ( _IO_FILE * , int ) ;
# 429 "/usr/include/libio.h" <System_Header>
extern int _IO_getc ( _IO_FILE * __fp ) ;
extern int _IO_putc ( int __c , _IO_FILE * __fp ) ;
extern int _IO_feof ( _IO_FILE * __fp ) ;
extern int _IO_ferror ( _IO_FILE * __fp ) ;
# 434 "/usr/include/libio.h" <System_Header>
extern int _IO_peekc_locked ( _IO_FILE * __fp ) ;
# 436 "/usr/include/libio.h" <System_Header>
# 440 "/usr/include/libio.h" <System_Header>
extern void _IO_flockfile ( _IO_FILE * ) ;
extern void _IO_funlockfile ( _IO_FILE * ) ;
extern int _IO_ftrylockfile ( _IO_FILE * ) ;
# 459 "/usr/include/libio.h" <System_Header>
extern int _IO_vfscanf ( _IO_FILE * __restrict , const char * __restrict ,
__gnuc_va_list , int * __restrict ) ;
extern int _IO_vfprintf ( _IO_FILE * __restrict , const char * __restrict ,
__gnuc_va_list ) ;
extern __ssize_t _IO_padn ( _IO_FILE * , int , __ssize_t ) ;
extern size_t _IO_sgetn ( _IO_FILE * , void * , size_t ) ;
# 466 "/usr/include/libio.h" <System_Header>
extern __off64_t _IO_seekoff ( _IO_FILE * , __off64_t , int , int ) ;
extern __off64_t _IO_seekpos ( _IO_FILE * , __off64_t , int ) ;
# 469 "/usr/include/libio.h" <System_Header>
extern void _IO_free_backup_area ( _IO_FILE * ) ;
# 75 "/usr/include/stdio.h" <System_Header>
# 83 "/usr/include/stdio.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/stdarg.h" <System_Header>
# 84 "/usr/include/stdio.h" <System_Header>
# 107 "/usr/include/stdio.h" <System_Header>
# 110 "/usr/include/stdio.h" <System_Header>
typedef _G_fpos_t fpos_t ;
# 119 "/usr/include/stdio.h" <System_Header>
# 125 "/usr/include/stdio.h" <System_Header>
# 132 "/usr/include/stdio.h" <System_Header>
# 139 "/usr/include/stdio.h" <System_Header>
# 150 "/usr/include/stdio.h" <System_Header>
# 163 "/usr/include/stdio.h" <System_Header>
# 164 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/stdio_lim.h" <System_Header>
# 165 "/usr/include/stdio.h" <System_Header>
# 167 "/usr/include/stdio.h" <System_Header>
extern struct _IO_FILE * stdin ;
extern struct _IO_FILE * stdout ;
extern struct _IO_FILE * stderr ;
# 177 "/usr/include/stdio.h" <System_Header>
extern int remove ( const char * __filename ) ;
extern int rename ( const char * __old , const char * __new ) ;
# 184 "/usr/include/stdio.h" <System_Header>
extern int renameat ( int __oldfd , const char * __old , int __newfd ,
const char * __new ) ;
# 193 "/usr/include/stdio.h" <System_Header>
# 195 "/usr/include/stdio.h" <System_Header>
extern FILE * tmpfile ( void ) ;
# 208 "/usr/include/stdio.h" <System_Header>
extern char * tmpnam ( char * __s ) ;
# 214 "/usr/include/stdio.h" <System_Header>
extern char * tmpnam_r ( char * __s ) ;
# 226 "/usr/include/stdio.h" <System_Header>
extern char * tempnam ( const char * __dir , const char * __pfx )
;
# 236 "/usr/include/stdio.h" <System_Header>
extern int fclose ( FILE * __stream ) ;
# 241 "/usr/include/stdio.h" <System_Header>
extern int fflush ( FILE * __stream ) ;
# 251 "/usr/include/stdio.h" <System_Header>
extern int fflush_unlocked ( FILE * __stream ) ;
# 271 "/usr/include/stdio.h" <System_Header>
extern FILE * fopen ( const char * __restrict __filename ,
const char * __restrict __modes ) ;
# 277 "/usr/include/stdio.h" <System_Header>
extern FILE * freopen ( const char * __restrict __filename ,
const char * __restrict __modes ,
FILE * __restrict __stream ) ;
# 305 "/usr/include/stdio.h" <System_Header>
extern FILE * fdopen ( int __fd , const char * __modes ) ;
# 318 "/usr/include/stdio.h" <System_Header>
extern FILE * fmemopen ( void * __s , size_t __len , const char * __modes )
;
# 324 "/usr/include/stdio.h" <System_Header>
extern FILE * open_memstream ( char * * __bufloc , size_t * __sizeloc ) ;
# 331 "/usr/include/stdio.h" <System_Header>
extern void setbuf ( FILE * __restrict __stream , char * __restrict __buf ) ;
# 335 "/usr/include/stdio.h" <System_Header>
extern int setvbuf ( FILE * __restrict __stream , char * __restrict __buf ,
int __modes , size_t __n ) ;
# 342 "/usr/include/stdio.h" <System_Header>
extern void setbuffer ( FILE * __restrict __stream , char * __restrict __buf ,
size_t __size ) ;
# 346 "/usr/include/stdio.h" <System_Header>
extern void setlinebuf ( FILE * __stream ) ;
# 355 "/usr/include/stdio.h" <System_Header>
extern int fprintf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
# 361 "/usr/include/stdio.h" <System_Header>
extern int printf ( const char * __restrict __format , ... ) ;
extern int sprintf ( char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 370 "/usr/include/stdio.h" <System_Header>
extern int vfprintf ( FILE * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 376 "/usr/include/stdio.h" <System_Header>
extern int vprintf ( const char * __restrict __format , __gnuc_va_list __arg ) ;
extern int vsprintf ( char * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 385 "/usr/include/stdio.h" <System_Header>
extern int snprintf ( char * __restrict __s , size_t __maxlen ,
const char * __restrict __format , ... )
__attribute__ ( ( __format__ ( __printf__ , 3 , 4 ) ) ) ;
# 390 "/usr/include/stdio.h" <System_Header>
extern int vsnprintf ( char * __restrict __s , size_t __maxlen ,
const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __printf__ , 3 , 0 ) ) ) ;
# 411 "/usr/include/stdio.h" <System_Header>
extern int vdprintf ( int __fd , const char * __restrict __fmt ,
__gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __printf__ , 2 , 0 ) ) ) ;
extern int dprintf ( int __fd , const char * __restrict __fmt , ... )
__attribute__ ( ( __format__ ( __printf__ , 2 , 3 ) ) ) ;
# 424 "/usr/include/stdio.h" <System_Header>
extern int fscanf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
# 430 "/usr/include/stdio.h" <System_Header>
extern int scanf ( const char * __restrict __format , ... ) ;
extern int sscanf ( const char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 452 "/usr/include/stdio.h" <System_Header>
extern int __isoc99_fscanf ( FILE * __restrict __stream ,
const char * __restrict __format , ... ) ;
extern int __isoc99_scanf ( const char * __restrict __format , ... ) ;
extern int __isoc99_sscanf ( const char * __restrict __s ,
const char * __restrict __format , ... ) ;
# 470 "/usr/include/stdio.h" <System_Header>
extern int vfscanf ( FILE * __restrict __s , const char * __restrict __format ,
__gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ;
# 478 "/usr/include/stdio.h" <System_Header>
extern int vscanf ( const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 1 , 0 ) ) ) ;
# 482 "/usr/include/stdio.h" <System_Header>
extern int vsscanf ( const char * __restrict __s ,
const char * __restrict __format , __gnuc_va_list __arg )
__attribute__ ( ( __format__ ( __scanf__ , 2 , 0 ) ) ) ;
# 508 "/usr/include/stdio.h" <System_Header>
extern int __isoc99_vfscanf ( FILE * __restrict __s ,
const char * __restrict __format ,
__gnuc_va_list __arg ) ;
extern int __isoc99_vscanf ( const char * __restrict __format ,
__gnuc_va_list __arg ) ;
extern int __isoc99_vsscanf ( const char * __restrict __s ,
const char * __restrict __format ,
__gnuc_va_list __arg ) ;
# 530 "/usr/include/stdio.h" <System_Header>
extern int fgetc ( FILE * __stream ) ;
extern int getc ( FILE * __stream ) ;
# 537 "/usr/include/stdio.h" <System_Header>
extern int getchar ( void ) ;
# 542 "/usr/include/stdio.h" <System_Header>
# 549 "/usr/include/stdio.h" <System_Header>
extern int getc_unlocked ( FILE * __stream ) ;
extern int getchar_unlocked ( void ) ;
# 560 "/usr/include/stdio.h" <System_Header>
extern int fgetc_unlocked ( FILE * __stream ) ;
# 572 "/usr/include/stdio.h" <System_Header>
extern int fputc ( int __c , FILE * __stream ) ;
extern int putc ( int __c , FILE * __stream ) ;
# 579 "/usr/include/stdio.h" <System_Header>
extern int putchar ( int __c ) ;
# 584 "/usr/include/stdio.h" <System_Header>
# 593 "/usr/include/stdio.h" <System_Header>
extern int fputc_unlocked ( int __c , FILE * __stream ) ;
# 601 "/usr/include/stdio.h" <System_Header>
extern int putc_unlocked ( int __c , FILE * __stream ) ;
extern int putchar_unlocked ( int __c ) ;
# 609 "/usr/include/stdio.h" <System_Header>
extern int getw ( FILE * __stream ) ;
# 612 "/usr/include/stdio.h" <System_Header>
extern int putw ( int __w , FILE * __stream ) ;
# 621 "/usr/include/stdio.h" <System_Header>
extern char * fgets ( char * __restrict __s , int __n , FILE * __restrict __stream )
;
# 637 "/usr/include/stdio.h" <System_Header>
extern char * gets ( char * __s ) ;
# 664 "/usr/include/stdio.h" <System_Header>
extern __ssize_t __getdelim ( char * * __restrict __lineptr ,
size_t * __restrict __n , int __delimiter ,
FILE * __restrict __stream ) ;
extern __ssize_t getdelim ( char * * __restrict __lineptr ,
size_t * __restrict __n , int __delimiter ,
FILE * __restrict __stream ) ;
# 677 "/usr/include/stdio.h" <System_Header>
extern __ssize_t getline ( char * * __restrict __lineptr ,
size_t * __restrict __n ,
FILE * __restrict __stream ) ;
# 688 "/usr/include/stdio.h" <System_Header>
extern int fputs ( const char * __restrict __s , FILE * __restrict __stream ) ;
# 694 "/usr/include/stdio.h" <System_Header>
extern int puts ( const char * __s ) ;
# 701 "/usr/include/stdio.h" <System_Header>
extern int ungetc ( int __c , FILE * __stream ) ;
# 708 "/usr/include/stdio.h" <System_Header>
extern size_t fread ( void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
# 714 "/usr/include/stdio.h" <System_Header>
extern size_t fwrite ( const void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __s ) ;
# 736 "/usr/include/stdio.h" <System_Header>
extern size_t fread_unlocked ( void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
extern size_t fwrite_unlocked ( const void * __restrict __ptr , size_t __size ,
size_t __n , FILE * __restrict __stream ) ;
# 748 "/usr/include/stdio.h" <System_Header>
extern int fseek ( FILE * __stream , long int __off , int __whence ) ;
# 753 "/usr/include/stdio.h" <System_Header>
extern long int ftell ( FILE * __stream ) ;
# 758 "/usr/include/stdio.h" <System_Header>
extern void rewind ( FILE * __stream ) ;
# 765 "/usr/include/stdio.h" <System_Header>
# 772 "/usr/include/stdio.h" <System_Header>
extern int fseeko ( FILE * __stream , __off_t __off , int __whence ) ;
# 777 "/usr/include/stdio.h" <System_Header>
extern __off_t ftello ( FILE * __stream ) ;
# 797 "/usr/include/stdio.h" <System_Header>
extern int fgetpos ( FILE * __restrict __stream , fpos_t * __restrict __pos ) ;
# 802 "/usr/include/stdio.h" <System_Header>
extern int fsetpos ( FILE * __stream , const fpos_t * __pos ) ;
# 825 "/usr/include/stdio.h" <System_Header>
extern void clearerr ( FILE * __stream ) ;
extern int feof ( FILE * __stream ) ;
extern int ferror ( FILE * __stream ) ;
# 834 "/usr/include/stdio.h" <System_Header>
extern void clearerr_unlocked ( FILE * __stream ) ;
extern int feof_unlocked ( FILE * __stream ) ;
extern int ferror_unlocked ( FILE * __stream ) ;
# 845 "/usr/include/stdio.h" <System_Header>
extern void perror ( const char * __s ) ;
# 852 "/usr/include/stdio.h" <System_Header>
# 853 "/usr/include/stdio.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/sys_errlist.h" <System_Header>
extern int sys_nerr ;
extern const char * const sys_errlist [ ] ;
# 854 "/usr/include/stdio.h" <System_Header>
# 857 "/usr/include/stdio.h" <System_Header>
extern int fileno ( FILE * __stream ) ;
# 862 "/usr/include/stdio.h" <System_Header>
extern int fileno_unlocked ( FILE * __stream ) ;
# 871 "/usr/include/stdio.h" <System_Header>
extern FILE * popen ( const char * __command , const char * __modes ) ;
# 877 "/usr/include/stdio.h" <System_Header>
extern int pclose ( FILE * __stream ) ;
# 883 "/usr/include/stdio.h" <System_Header>
extern char * ctermid ( char * __s ) ;
# 909 "/usr/include/stdio.h" <System_Header>
# 911 "/usr/include/stdio.h" <System_Header>
extern void flockfile ( FILE * __stream ) ;
# 915 "/usr/include/stdio.h" <System_Header>
extern int ftrylockfile ( FILE * __stream ) ;
# 918 "/usr/include/stdio.h" <System_Header>
extern void funlockfile ( FILE * __stream ) ;
# 931 "/usr/include/stdio.h" <System_Header>
# 7 "main.c"
# 7 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 8 "main.c"
# 8 "main.c"
# 1 "/usr/include/string.h" <System_Header>
# 16 "/usr/include/string.h" <System_Header>
# 20 "/usr/include/string.h" <System_Header>
# 25 "/usr/include/string.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/string.h" <System_Header>
# 29 "/usr/include/string.h" <System_Header>
# 32 "/usr/include/string.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 33 "/usr/include/string.h" <System_Header>
# 34 "/usr/include/string.h" <System_Header>
# 41 "/usr/include/string.h" <System_Header>
extern void * memcpy ( void * __restrict __dest , const void * __restrict __src ,
size_t __n ) ;
# 45 "/usr/include/string.h" <System_Header>
extern void * memmove ( void * __dest , const void * __src , size_t __n )
;
# 52 "/usr/include/string.h" <System_Header>
# 54 "/usr/include/string.h" <System_Header>
extern void * memccpy ( void * __restrict __dest , const void * __restrict __src ,
int __c , size_t __n )
;
# 61 "/usr/include/string.h" <System_Header>
extern void * memset ( void * __s , int __c , size_t __n ) ;
# 64 "/usr/include/string.h" <System_Header>
extern int memcmp ( const void * __s1 , const void * __s2 , size_t __n )
;
# 68 "/usr/include/string.h" <System_Header>
# 92 "/usr/include/string.h" <System_Header>
extern void * memchr ( const void * __s , int __c , size_t __n )
;
# 124 "/usr/include/string.h" <System_Header>
extern char * strcpy ( char * __restrict __dest , const char * __restrict __src )
;
extern char * strncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 132 "/usr/include/string.h" <System_Header>
extern char * strcat ( char * __restrict __dest , const char * __restrict __src )
;
extern char * strncat ( char * __restrict __dest , const char * __restrict __src ,
size_t __n ) ;
# 139 "/usr/include/string.h" <System_Header>
extern int strcmp ( const char * __s1 , const char * __s2 )
;
extern int strncmp ( const char * __s1 , const char * __s2 , size_t __n )
;
# 146 "/usr/include/string.h" <System_Header>
extern int strcoll ( const char * __s1 , const char * __s2 )
;
extern size_t strxfrm ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 158 "/usr/include/string.h" <System_Header>
# 159 "/usr/include/string.h" <System_Header>
# 1 "/usr/include/xlocale.h" <System_Header>
# 18 "/usr/include/xlocale.h" <System_Header>
# 26 "/usr/include/xlocale.h" <System_Header>
typedef struct __locale_struct
{
struct __locale_data * __locales [ 13 ] ;
# 32 "/usr/include/xlocale.h" <System_Header>
const unsigned short int * __ctype_b ;
const int * __ctype_tolower ;
const int * __ctype_toupper ;
# 37 "/usr/include/xlocale.h" <System_Header>
const char * __names [ 13 ] ;
} * __locale_t ;
# 41 "/usr/include/xlocale.h" <System_Header>
typedef __locale_t locale_t ;
# 160 "/usr/include/string.h" <System_Header>
# 161 "/usr/include/string.h" <System_Header>
extern int strcoll_l ( const char * __s1 , const char * __s2 , __locale_t __l )
;
extern size_t strxfrm_l ( char * __dest , const char * __src , size_t __n ,
__locale_t __l ) ;
# 170 "/usr/include/string.h" <System_Header>
extern char * strdup ( const char * __s )
;
# 177 "/usr/include/string.h" <System_Header>
# 179 "/usr/include/string.h" <System_Header>
extern char * strndup ( const char * __string , size_t __n )
;
# 207 "/usr/include/string.h" <System_Header>
# 231 "/usr/include/string.h" <System_Header>
extern char * strchr ( const char * __s , int __c )
;
# 234 "/usr/include/string.h" <System_Header>
# 258 "/usr/include/string.h" <System_Header>
extern char * strrchr ( const char * __s , int __c )
;
# 279 "/usr/include/string.h" <System_Header>
extern size_t strcspn ( const char * __s , const char * __reject )
;
# 283 "/usr/include/string.h" <System_Header>
extern size_t strspn ( const char * __s , const char * __accept )
;
# 310 "/usr/include/string.h" <System_Header>
extern char * strpbrk ( const char * __s , const char * __accept )
;
# 313 "/usr/include/string.h" <System_Header>
# 337 "/usr/include/string.h" <System_Header>
extern char * strstr ( const char * __haystack , const char * __needle )
;
# 342 "/usr/include/string.h" <System_Header>
extern char * strtok ( char * __restrict __s , const char * __restrict __delim )
;
# 348 "/usr/include/string.h" <System_Header>
extern char * __strtok_r ( char * __restrict __s ,
const char * __restrict __delim ,
char * * __restrict __save_ptr )
;
# 354 "/usr/include/string.h" <System_Header>
extern char * strtok_r ( char * __restrict __s , const char * __restrict __delim ,
char * * __restrict __save_ptr )
;
# 393 "/usr/include/string.h" <System_Header>
extern size_t strlen ( const char * __s )
;
# 400 "/usr/include/string.h" <System_Header>
extern size_t strnlen ( const char * __string , size_t __maxlen )
;
# 407 "/usr/include/string.h" <System_Header>
extern char * strerror ( int __errnum ) ;
# 417 "/usr/include/string.h" <System_Header>
# 420 "/usr/include/string.h" <System_Header>
# 426 "/usr/include/string.h" <System_Header>
extern int __xpg_strerror_r ( int __errnum , char * __buf , size_t __buflen )
;
# 439 "/usr/include/string.h" <System_Header>
extern char * strerror_l ( int __errnum , __locale_t __l ) ;
# 445 "/usr/include/string.h" <System_Header>
extern void __bzero ( void * __s , size_t __n ) ;
# 449 "/usr/include/string.h" <System_Header>
extern void bcopy ( const void * __src , void * __dest , size_t __n )
;
# 453 "/usr/include/string.h" <System_Header>
extern void bzero ( void * __s , size_t __n ) ;
# 456 "/usr/include/string.h" <System_Header>
extern int bcmp ( const void * __s1 , const void * __s2 , size_t __n )
;
# 460 "/usr/include/string.h" <System_Header>
# 484 "/usr/include/string.h" <System_Header>
extern char * index ( const char * __s , int __c )
;
# 488 "/usr/include/string.h" <System_Header>
# 512 "/usr/include/string.h" <System_Header>
extern char * rindex ( const char * __s , int __c )
;
# 517 "/usr/include/string.h" <System_Header>
extern int ffs ( int __i ) __attribute__ ( ( __const__ ) ) ;
# 521 "/usr/include/string.h" <System_Header>
# 528 "/usr/include/string.h" <System_Header>
extern int strcasecmp ( const char * __s1 , const char * __s2 )
;
# 532 "/usr/include/string.h" <System_Header>
extern int strncasecmp ( const char * __s1 , const char * __s2 , size_t __n )
;
# 551 "/usr/include/string.h" <System_Header>
extern char * strsep ( char * * __restrict __stringp ,
const char * __restrict __delim )
;
# 558 "/usr/include/string.h" <System_Header>
extern char * strsignal ( int __sig ) ;
# 561 "/usr/include/string.h" <System_Header>
extern char * __stpcpy ( char * __restrict __dest , const char * __restrict __src )
;
extern char * stpcpy ( char * __restrict __dest , const char * __restrict __src )
;
# 568 "/usr/include/string.h" <System_Header>
extern char * __stpncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
extern char * stpncpy ( char * __restrict __dest ,
const char * __restrict __src , size_t __n )
;
# 9 "main.c"
# 9 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 33 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 34 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 4 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 7 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 30 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 25 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 28 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 1 "/usr/include/limits.h" <System_Header>
# 16 "/usr/include/limits.h" <System_Header>
# 20 "/usr/include/limits.h" <System_Header>
# 25 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 26 "/usr/include/limits.h" <System_Header>
# 30 "/usr/include/limits.h" <System_Header>
# 35 "/usr/include/limits.h" <System_Header>
# 40 "/usr/include/limits.h" <System_Header>
# 44 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 11 "/usr/include/x86_64-linux-gnu/bits/wordsize.h" <System_Header>
# 45 "/usr/include/limits.h" <System_Header>
# 47 "/usr/include/limits.h" <System_Header>
# 50 "/usr/include/limits.h" <System_Header>
# 52 "/usr/include/limits.h" <System_Header>
# 55 "/usr/include/limits.h" <System_Header>
# 59 "/usr/include/limits.h" <System_Header>
# 62 "/usr/include/limits.h" <System_Header>
# 71 "/usr/include/limits.h" <System_Header>
# 75 "/usr/include/limits.h" <System_Header>
# 78 "/usr/include/limits.h" <System_Header>
# 82 "/usr/include/limits.h" <System_Header>
# 85 "/usr/include/limits.h" <System_Header>
# 93 "/usr/include/limits.h" <System_Header>
# 102 "/usr/include/limits.h" <System_Header>
# 106 "/usr/include/limits.h" <System_Header>
# 116 "/usr/include/limits.h" <System_Header>
# 120 "/usr/include/limits.h" <System_Header>
# 128 "/usr/include/limits.h" <System_Header>
# 142 "/usr/include/limits.h" <System_Header>
# 143 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 33 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 36 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 56 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 59 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 69 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 72 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 75 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 82 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 95 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 98 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 102 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 105 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 108 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 111 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 114 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 117 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 120 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 123 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 127 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 130 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 133 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 136 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 155 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 159 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 160 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 38 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 1 "/usr/include/linux/limits.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 45 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 55 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 61 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 63 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 68 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 73 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 77 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 80 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 83 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 86 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 89 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 92 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 95 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 98 "/usr/include/x86_64-linux-gnu/bits/local_lim.h" <System_Header>
# 161 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 169 "/usr/include/x86_64-linux-gnu/bits/posix1_lim.h" <System_Header>
# 144 "/usr/include/limits.h" <System_Header>
# 147 "/usr/include/limits.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 20 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 32 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 35 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 54 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 60 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 87 "/usr/include/x86_64-linux-gnu/bits/posix2_lim.h" <System_Header>
# 148 "/usr/include/limits.h" <System_Header>
# 169 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 8 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 57 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 66 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 71 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 77 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 85 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 102 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 108 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 116 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 122 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 127 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 133 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 138 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 144 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 163 "/usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h" <System_Header>
# 31 "/opt/pgi/linux86-64/17.10/include-gcc50/limits.h" <System_Header>
# 10 "main.c"
# 11 "main.c"
# 27 "main.c"
int __MACC_NUMGPUS = - 1 ;
# 29 "main.c"
int __macc_get_num_gpus ( )
{
return acc_get_num_devices ( acc_device_nvidia ) ;
}
# 34 "main.c"
int * __MACC_TOPOLOGY ;
# 36 "main.c"
void __macc_set_gpu_num ( int i )
{
acc_set_device_num ( __MACC_TOPOLOGY [ i ] , acc_device_nvidia ) ;
}
# 44 "main.c"
struct __MaccDataTableEntry {
void * addr ;
void * addr_ub ;
int type_size ;
int entire_lb ;
int entire_ub ;
int dirty ;
int dirty_lb ;
int dirty_ub ;
int offset ;
struct __MaccDataTableEntry * next ;
} ;
# 57 "main.c"
struct __MaccDataTable {
struct __MaccDataTableEntry * entries [ 256 ] ;
} ;
# 61 "main.c"
struct __MaccDataTable * __MACC_DATA_TABLE_SET ;
# 67 "main.c"
struct __MaccDataWrapCache {
void * addr [ 16 * 16 ] ;
struct __MaccDataTableEntry * entry [ 16 * 16 ] ;
int offset [ 16 * 16 ] ;
int cachenum [ 16 ] ;
} ;
# 74 "main.c"
struct __MaccDataWrapCache * __MACC_DATA_WRAP_CACHE_SET ;
# 76 "main.c"
void __macc_data_table_insert (
int gpu_num , void * ptr , int type_size , int entire_lb , int entire_ub )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
# 81 "main.c"
struct __MaccDataTableEntry * new_entry = malloc_managed ( sizeof ( struct __MaccDataTableEntry ) ) ;
# 83 "main.c"
new_entry -> addr = ptr ;
new_entry -> addr_ub = ptr + entire_ub * type_size ;
new_entry -> type_size = type_size ;
new_entry -> entire_lb = entire_lb ;
new_entry -> entire_ub = entire_ub ;
new_entry -> dirty = 0 ;
new_entry -> dirty_lb = - 1 ;
new_entry -> dirty_ub = - 1 ;
new_entry -> next = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
# 93 "main.c"
__MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = new_entry ;
}
# 96 "main.c"
struct __MaccDataTableEntry * __macc_data_table_find ( int gpu_num , void * ptr )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
# 101 "main.c"
while ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
entry -> offset = 0 ;
return entry ;
}
# 107 "main.c"
entry = entry -> next ;
}
# 110 "main.c"
struct __MaccDataWrapCache wrap_cache = __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] ;
int lane = ( ( ( long ) ptr / 16 ) % 16 ) ;
# 113 "main.c"
for ( int i = 0 ; i < wrap_cache . cachenum [ lane ] ; i ++ ) {
if ( ptr == wrap_cache . addr [ lane * 16 + i ] ) {
entry = wrap_cache . entry [ lane * 16 + i ] ;
entry -> offset = wrap_cache . offset [ lane * 16 + i ] ;
return entry ;
}
}
# 121 "main.c"
for ( int i = 0 ; i < 256 ; i ++ ) {
entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ i ] ;
# 124 "main.c"
while ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr <= ptr && ptr <= entry -> addr_ub ) {
int offset = ( ptr - entry -> addr ) / entry -> type_size ;
# 128 "main.c"
int cachenum = wrap_cache . cachenum [ lane ] ;
# 130 "main.c"
if ( cachenum == 16 ) {
cachenum = 0 ;
}
# 134 "main.c"
wrap_cache . addr [ lane * 16 + cachenum ] = entry -> addr ;
wrap_cache . entry [ lane * 16 + cachenum ] = entry ;
wrap_cache . offset [ lane * 16 + cachenum ] = offset ;
# 138 "main.c"
wrap_cache . cachenum [ lane ] = cachenum + 1 ;
# 140 "main.c"
entry -> offset = offset ;
return entry ;
}
# 144 "main.c"
entry = entry -> next ;
}
}
# 148 "main.c"
fprintf ( stderr , "Error on __macc_data_table_find: Not found the item %p\n" , ptr ) ;
exit ( - 1 ) ;
# 151 "main.c"
return ( ( void * ) 0 ) ;
}
# 154 "main.c"
void __macc_data_table_delete ( int gpu_num , void * ptr )
{
int index = ( ( ( long ) ptr / 16 ) % 256 ) ;
struct __MaccDataTableEntry * entry = __MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] ;
struct __MaccDataTableEntry * pre = ( ( void * ) 0 ) ;
# 160 "main.c"
memset ( __MACC_DATA_WRAP_CACHE_SET [ gpu_num ] . cachenum , 0 , 16 * sizeof ( int ) ) ;
# 162 "main.c"
if ( entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
__MACC_DATA_TABLE_SET [ gpu_num ] . entries [ index ] = entry -> next ;
free_managed ( entry ) ;
return ;
}
# 169 "main.c"
pre = entry ;
entry = entry -> next ;
}
# 173 "main.c"
while ( pre != ( ( void * ) 0 ) && entry != ( ( void * ) 0 ) ) {
if ( entry -> addr == ptr ) {
pre -> next = entry -> next ;
free_managed ( entry ) ;
return ;
}
# 180 "main.c"
pre = entry ;
entry = entry -> next ;
}
# 184 "main.c"
fprintf ( stderr , "Error on __macc_data_table_delete: Not found the item %p\n" , ptr ) ;
exit ( - 1 ) ;
}
# 188 "main.c"
void __macc_delete ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_delete_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_delete ( gpu_num , ptr ) ;
acc_wait ( gpu_num ) ;
}
# 195 "main.c"
void __macc_copyout ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
# 199 "main.c"
if ( entry -> dirty )
acc_update_self_async ( ( entry -> addr + entry -> dirty_lb * entry -> type_size ) ,
( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) ,
gpu_num ) ;
# 204 "main.c"
__macc_delete ( gpu_num , ptr , type_size , lb , length ) ;
}
# 207 "main.c"
void __macc_copyin ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_copyin_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ;
acc_wait ( gpu_num ) ;
}
# 214 "main.c"
void __macc_create ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_create_async ( ( ptr + lb * type_size ) , length * type_size , gpu_num ) ;
__macc_data_table_insert ( gpu_num , ptr , type_size , lb , lb + length - 1 ) ;
acc_wait ( gpu_num ) ;
}
# 221 "main.c"
void * __macc_malloc ( unsigned long size )
{
void * ret = malloc_managed ( size ) ;
# 225 "main.c"
# 225 "main.c"
#pragma omp parallel num_threads ( __MACC_NUMGPUS )
{
__macc_create ( omp_get_thread_num ( ) , ret , 1 , 0 , size ) ;
}
# 230 "main.c"
return ret ;
}
# 233 "main.c"
void __macc_free ( void * ptr )
{
# 235 "main.c"
#pragma omp parallel num_threads ( __MACC_NUMGPUS )
{
int gpu_num = omp_get_thread_num ( ) ;
struct __MaccDataTableEntry * entry =
__macc_data_table_find ( gpu_num , ptr ) ;
__macc_delete ( gpu_num , ptr , 1 , 0 , entry -> entire_ub + 1 ) ;
}
free_managed ( ptr ) ;
}
# 245 "main.c"
void __macc_update_self ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
ptr = entry -> addr ;
lb += entry -> offset ;
int ub = lb + length - 1 ;
# 252 "main.c"
if ( entry -> dirty && ( ! ( entry -> dirty_lb > ub || entry -> dirty_ub < lb ) ) ) {
int new_lb = ( ( ( entry -> dirty_lb ) > ( lb ) ) ? ( entry -> dirty_lb ) : ( lb ) ) ;
int new_ub = ( ( ( entry -> dirty_ub ) < ( ub ) ) ? ( entry -> dirty_ub ) : ( ub ) ) ;
acc_update_self ( ( ptr + new_lb * type_size ) , ( ( new_ub - new_lb + 1 ) * type_size ) ) ;
}
}
# 259 "main.c"
void __macc_update_device ( int gpu_num , void * ptr , int type_size , int lb , int length )
{
acc_update_device ( ( ptr + lb * type_size ) , length * type_size ) ;
}
# 264 "main.c"
void __macc_init_access_region ( int gpu_num , int * lb_set , int * ub_set )
{
lb_set [ gpu_num ] = 2147483647 ;
ub_set [ gpu_num ] = - 1 ;
}
# 270 "main.c"
void __macc_update_access_region ( int gpu_num , int * lb_set , int * ub_set , int val )
{
lb_set [ gpu_num ] = ( ( ( lb_set [ gpu_num ] ) < ( val ) ) ? ( lb_set [ gpu_num ] ) : ( val ) ) ;
ub_set [ gpu_num ] = ( ( ( ub_set [ gpu_num ] ) > ( val ) ) ? ( ub_set [ gpu_num ] ) : ( val ) ) ;
}
# 276 "main.c"
int __macc_region_is_overlapping ( int * lb_set , int * ub_set )
{
for ( int i = 0 ; i < __MACC_NUMGPUS - 1 ; i ++ )
for ( int j = i + 1 ; j < __MACC_NUMGPUS ; j ++ )
if ( ( ! ( lb_set [ i ] > ub_set [ j ] || ub_set [ i ] < lb_set [ j ] ) ) )
return 1 ;
# 283 "main.c"
return 0 ;
}
# 286 "main.c"
void __macc_calc_loop_region
( int * loop_lb_set , int * loop_ub_set ,
int entire_start , int entire_end , int step , int until_equal )
{
int tmp = entire_start + step * ( ( entire_end - entire_start ) / step ) ;
entire_end = tmp - ( ( until_equal || entire_end != tmp ) ? 0 : step ) ;
# 294 "main.c"
int len = entire_end - entire_start + step ;
int width = ( int ) ( ( float ) len / __MACC_NUMGPUS ) ;
width -= width % step ;
int rem = ( len - width * __MACC_NUMGPUS ) / step ;
width -= step ;
# 300 "main.c"
int pos = entire_start ;
# 302 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
loop_lb_set [ i ] = pos ;
pos = ( width < 0 ) ? pos : ( ( ( pos + width + ( ( i < rem ) ? step : 0 ) ) < ( entire_end ) ) ? ( pos + width + ( ( i < rem ) ? step : 0 ) ) : ( entire_end ) ) ;
loop_ub_set [ i ] = pos ;
pos = ( ( ( pos + step ) < ( entire_end ) ) ? ( pos + step ) : ( entire_end ) ) ;
}
}
# 310 "main.c"
void __macc_adjust_data_region ( void * ptr , int gpu_num , int * lb_set , int * ub_set )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
# 314 "main.c"
lb_set [ gpu_num ] += entry -> offset ;
ub_set [ gpu_num ] += entry -> offset ;
}
# 318 "main.c"
void __macc_rewrite_loop_region_into_single ( int * loop_lb_set , int * loop_ub_set )
{
loop_ub_set [ 0 ] = loop_ub_set [ __MACC_NUMGPUS - 1 ] ;
# 322 "main.c"
for ( int i = 1 ; i < __MACC_NUMGPUS ; i ++ ) {
loop_lb_set [ i ] = 1 ;
loop_ub_set [ i ] = 0 ;
}
}
# 328 "main.c"
void __macc_rewrite_data_region_into_single ( int * lb_set , int * ub_set )
{
int gpu_ub = __MACC_NUMGPUS - 1 ;
lb_set [ 0 ] = ( ( ( lb_set [ 0 ] ) < ( lb_set [ gpu_ub ] ) ) ? ( lb_set [ 0 ] ) : ( lb_set [ gpu_ub ] ) ) ;
ub_set [ 0 ] = ( ( ( ub_set [ 0 ] ) > ( ub_set [ gpu_ub ] ) ) ? ( ub_set [ 0 ] ) : ( ub_set [ gpu_ub ] ) ) ;
}
# 335 "main.c"
void __macc_sync_data ( int gpu_num , void * ptr , int type_size , int lb , int ub )
{
void * update_addr = ( ptr + lb * type_size ) ;
size_t length_b = ( ( ub - lb + 1 ) * type_size ) ;
# 340 "main.c"
acc_update_self ( update_addr , length_b ) ;
# 342 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ )
{
if ( i != gpu_num ) {
__macc_set_gpu_num ( i ) ;
acc_update_device ( update_addr , length_b ) ;
}
}
# 352 "main.c"
__macc_set_gpu_num ( gpu_num ) ;
}
# 355 "main.c"
void __macc_set_data_region ( int gpu_num , void * ptr , int multi ,
int use_type , int * use_lb_set , int * use_ub_set ,
int def_type , int * def_lb_set , int * def_ub_set )
{
struct __MaccDataTableEntry * entry = __macc_data_table_find ( gpu_num , ptr ) ;
ptr = entry -> addr ;
# 363 "main.c"
if ( entry -> dirty && ( multi || gpu_num != 0 ) && __MACC_NUMGPUS > 1 ) {
int update_all = 0 ;
int update_all_DtoH = 0 ;
# 370 "main.c"
if ( use_type == 0 || def_type == 0 )
update_all = 1 ;
# 373 "main.c"
else if ( def_type == 2 ) {
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
if ( i != gpu_num &&
( ! ( entry -> dirty_lb > def_ub_set [ i ] || entry -> dirty_ub < def_lb_set [ i ] ) ) ) {
# 378 "main.c"
update_all = 1 ;
break ;
}
}
}
# 384 "main.c"
if ( ! update_all ) {
int every_whole = 1 ;
int unused_lb = entry -> dirty_lb ;
int unused_ub = entry -> dirty_ub ;
# 389 "main.c"
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ ) {
if ( i != gpu_num ) {
if ( ( use_lb_set [ i ] <= entry -> dirty_lb && entry -> dirty_ub <= use_ub_set [ i ] ) ) {
# 393 "main.c"
update_all_DtoH = 1 ;
}
else {
every_whole = 0 ;
# 398 "main.c"
if ( use_lb_set [ i ] <= unused_lb )
unused_lb = ( ( ( unused_lb ) > ( use_ub_set [ i ] + 1 ) ) ? ( unused_lb ) : ( use_ub_set [ i ] + 1 ) ) ;
else if ( use_ub_set [ i ] >= unused_ub )
unused_ub = ( ( ( unused_ub ) < ( use_lb_set [ i ] - 1 ) ) ? ( unused_ub ) : ( use_lb_set [ i ] - 1 ) ) ;
}
}
}
# 406 "main.c"
if ( every_whole )
update_all = 1 ;
if ( unused_ub < unused_lb )
update_all_DtoH = 1 ;
}
# 412 "main.c"
if ( update_all ) {
__macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ;
entry -> dirty = 0 ;
}
# 418 "main.c"
else if ( entry -> dirty && use_type == 2 ) {
int thread_num = multi ? __MACC_NUMGPUS : 1 ;
# 422 "main.c"
if ( update_all_DtoH )
acc_update_self ( ( ptr + entry -> dirty_lb * entry -> type_size ) ,
( ( entry -> dirty_ub - entry -> dirty_lb + 1 ) * entry -> type_size ) ) ;
# 426 "main.c"
for ( int i = 0 ; i < thread_num ; i ++ )
{
# 431 "main.c"
if ( i != gpu_num && ( ! ( entry -> dirty_lb > use_ub_set [ i ] || entry -> dirty_ub < use_lb_set [ i ] ) ) ) {
# 435 "main.c"
int update_lb = ( ( ( entry -> dirty_lb ) > ( use_lb_set [ i ] ) ) ? ( entry -> dirty_lb ) : ( use_lb_set [ i ] ) ) ;
int update_ub = ( ( ( entry -> dirty_ub ) < ( use_ub_set [ i ] ) ) ? ( entry -> dirty_ub ) : ( use_ub_set [ i ] ) ) ;
void * update_addr = ( ptr + update_lb * entry -> type_size ) ;
size_t length_b = ( ( update_ub - update_lb + 1 ) * entry -> type_size ) ;
# 440 "main.c"
if ( ! update_all_DtoH ) {
__macc_set_gpu_num ( gpu_num ) ;
acc_update_self ( update_addr , length_b ) ;
}
__macc_set_gpu_num ( i ) ;
acc_update_device ( update_addr , length_b ) ;
}
}
__macc_set_gpu_num ( gpu_num ) ;
}
}
# 453 "main.c"
if ( ( multi || gpu_num == 0 ) && def_type != 1 ) {
if ( def_type == 0 ) {
entry -> dirty = 1 ;
entry -> dirty_lb = entry -> entire_lb ;
entry -> dirty_ub = entry -> entire_ub ;
}
# 465 "main.c"
else if ( ! ( entry -> dirty ) ) {
entry -> dirty = 1 ;
entry -> dirty_lb = def_lb_set [ gpu_num ] ;
entry -> dirty_ub = def_ub_set [ gpu_num ] ;
}
# 471 "main.c"
else if (
( ! ( entry -> dirty_lb > def_ub_set [ gpu_num ] || entry -> dirty_ub < def_lb_set [ gpu_num ] ) ) ||
# 477 "main.c"
entry -> dirty_lb == def_ub_set [ gpu_num ] + 1 ||
def_lb_set [ gpu_num ] == entry -> dirty_ub + 1
) {
entry -> dirty_lb = ( ( ( entry -> dirty_lb ) < ( def_lb_set [ gpu_num ] ) ) ? ( entry -> dirty_lb ) : ( def_lb_set [ gpu_num ] ) ) ;
entry -> dirty_ub = ( ( ( entry -> dirty_ub ) > ( def_ub_set [ gpu_num ] ) ) ? ( entry -> dirty_ub ) : ( def_ub_set [ gpu_num ] ) ) ;
}
# 485 "main.c"
else {
__macc_sync_data ( gpu_num , ptr , entry -> type_size , entry -> dirty_lb , entry -> dirty_ub ) ;
entry -> dirty_lb = def_lb_set [ gpu_num ] ;
entry -> dirty_ub = def_ub_set [ gpu_num ] ;
}
}
}
# 493 "main.c"
void __macc_init ( )
{
char * env_macc_numgpus = getenv ( "MACC_NUMGPUS" ) ;
# 497 "main.c"
if ( env_macc_numgpus != ( ( void * ) 0 ) ) {
__MACC_NUMGPUS = atoi ( env_macc_numgpus ) ;
}
else {
__MACC_NUMGPUS = __macc_get_num_gpus ( ) ;
}
# 504 "main.c"
if ( __MACC_NUMGPUS <= 0 ) {
fputs ( "[MACC ERROR] No GPU device found." , stderr ) ;
exit ( - 1 ) ;
}
# 509 "main.c"
__MACC_TOPOLOGY = malloc_managed ( __MACC_NUMGPUS * sizeof ( int ) ) ;
char * topo = getenv ( "MACC_TOPOLOGY" ) ;
# 512 "main.c"
if ( topo != ( ( void * ) 0 ) ) {
int i = 0 ;
topo = strtok ( topo , "," ) ;
while ( topo != ( ( void * ) 0 ) && i < __MACC_NUMGPUS ) {
__MACC_TOPOLOGY [ i ] = atoi ( topo ) ;
topo = strtok ( ( ( void * ) 0 ) , "," ) ;
i ++ ;
}
} else {
for ( int i = 0 ; i < __MACC_NUMGPUS ; i ++ )
__MACC_TOPOLOGY [ i ] = i ;
}
# 525 "main.c"
# 538 "main.c"
__MACC_DATA_TABLE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataTable ) ) ;
__MACC_DATA_WRAP_CACHE_SET = calloc_managed ( __MACC_NUMGPUS , sizeof ( struct __MaccDataWrapCache ) ) ;
# 541 "main.c"
for ( int t = 0 ; t < 10 ; t ++ ) {
printf ( "[MACC] Wake up (%d)\n" , t ) ;
# 545 "main.c"
int n = 256 * 1024 * 1024 ;
int * tmp = malloc_managed ( n * sizeof ( int ) ) ;
# 548 "main.c"
# 548 "main.c"
#pragma acc data copy ( tmp [ 0 : n ] )
{
# 550 "main.c"
#pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector
# 552 "main.c"
for ( int i = 1 ; i < n ; i ++ )
tmp [ i ] = i ;
# 555 "main.c"
# 555 "main.c"
#pragma acc parallel loop num_gangs ( 512 ) vector_length ( 1024 ) gang vector
# 557 "main.c"
for ( int i = 1 ; i < n ; i ++ )
tmp [ n - i ] += i ;
}
# 561 "main.c"
free_managed ( tmp ) ;
}
}
# 578 "main.c"
# 1 "../../common/wtime.h"
# 3 "../../common/wtime.h"
# 579 "main.c"
# 579 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 580 "main.c"
# 581 "main.c"
void wtime_ ( double * ) ;
# 585 "main.c"
static double elapsed_time ( void )
{
double t ;
# 592 "main.c"
wtime_ ( & t ) ;
return ( t ) ;
}
# 597 "main.c"
static double start [ 64 ] , elapsed [ 64 ] ;
# 599 "main.c"
void timer_clear ( int n )
{
elapsed [ n ] = 0.0 ;
}
# 608 "main.c"
void timer_start ( int n )
{
start [ n ] = elapsed_time ( ) ;
}
# 617 "main.c"
void timer_stop ( int n )
{
double t , now ;
# 624 "main.c"
now = elapsed_time ( ) ;
t = now - start [ n ] ;
elapsed [ n ] += t ;
# 628 "main.c"
}
# 631 "main.c"
double timer_read ( int n )
{
return ( elapsed [ n ] ) ;
}
# 639 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 640 "main.c"
# 640 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 29 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 35 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double tgamma ( double ) ;
float tgammaf ( float ) ;
# 38 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double round ( double ) ;
float roundf ( float ) ;
long int lround ( double ) ;
long int lroundf ( float ) ;
long long int llround ( double ) ;
long long int llroundf ( float ) ;
# 59 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 1 "/usr/include/math.h" <System_Header>
# 17 "/usr/include/math.h" <System_Header>
# 21 "/usr/include/math.h" <System_Header>
# 26 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 27 "/usr/include/math.h" <System_Header>
# 30 "/usr/include/math.h" <System_Header>
# 31 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 31 "/usr/include/x86_64-linux-gnu/bits/libm-simd-decl-stubs.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/bits/math-vector.h" <System_Header>
# 32 "/usr/include/math.h" <System_Header>
# 34 "/usr/include/math.h" <System_Header>
# 35 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
# 41 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
typedef union { unsigned char __c [ 8 ] ; double __d ; } __huge_val_t ;
# 50 "/usr/include/x86_64-linux-gnu/bits/huge_val.h" <System_Header>
static __huge_val_t __huge_val = { { 0 , 0 , 0 , 0 , 0 , 0 , 0xf0 , 0x7f } } ;
# 36 "/usr/include/math.h" <System_Header>
# 37 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
typedef union { unsigned char __c [ 4 ] ; float __f ; } __huge_valf_t ;
# 48 "/usr/include/x86_64-linux-gnu/bits/huge_valf.h" <System_Header>
static __huge_valf_t __huge_valf = { { 0 , 0 , 0x80 , 0x7f } } ;
# 38 "/usr/include/math.h" <System_Header>
# 38 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
# 18 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
# 37 "/usr/include/x86_64-linux-gnu/bits/huge_vall.h" <System_Header>
static union { unsigned char __c [ 12 ] ; long double __ld ; } __huge_vall = { { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0x80 , 0xff , 0x7f , 0 , 0 } } ;
# 39 "/usr/include/math.h" <System_Header>
# 40 "/usr/include/math.h" <System_Header>
# 41 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/bits/inf.h" <System_Header>
# 42 "/usr/include/math.h" <System_Header>
# 43 "/usr/include/math.h" <System_Header>
# 44 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 39 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 1 "/usr/include/endian.h" <System_Header>
# 16 "/usr/include/endian.h" <System_Header>
# 40 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
# 48 "/usr/include/x86_64-linux-gnu/bits/nan.h" <System_Header>
static union { unsigned char __c [ 4 ] ; float __d ; } __qnan_union
__attribute__ ( ( __unused__ ) ) = { { 0 , 0 , 0xc0 , 0x7f } } ;
# 45 "/usr/include/math.h" <System_Header>
# 47 "/usr/include/math.h" <System_Header>
# 48 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
typedef float float_t ;
typedef double double_t ;
# 41 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 46 "/usr/include/x86_64-linux-gnu/bits/mathdef.h" <System_Header>
# 49 "/usr/include/math.h" <System_Header>
# 53 "/usr/include/math.h" <System_Header>
# 83 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double acos ( double __x ) ; extern double __acos ( double __x ) ;
extern double asin ( double __x ) ; extern double __asin ( double __x ) ;
extern double atan ( double __x ) ; extern double __atan ( double __x ) ;
extern double atan2 ( double __y , double __x ) ; extern double __atan2 ( double __y , double __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cos ( double __x ) ; extern double __cos ( double __x ) ;
extern double sin ( double __x ) ; extern double __sin ( double __x ) ;
extern double tan ( double __x ) ; extern double __tan ( double __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cosh ( double __x ) ; extern double __cosh ( double __x ) ;
extern double sinh ( double __x ) ; extern double __sinh ( double __x ) ;
extern double tanh ( double __x ) ; extern double __tanh ( double __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double acosh ( double __x ) ; extern double __acosh ( double __x ) ;
extern double asinh ( double __x ) ; extern double __asinh ( double __x ) ;
extern double atanh ( double __x ) ; extern double __atanh ( double __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double exp ( double __x ) ; extern double __exp ( double __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double frexp ( double __x , int * __exponent ) ; extern double __frexp ( double __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double ldexp ( double __x , int __exponent ) ; extern double __ldexp ( double __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log ( double __x ) ; extern double __log ( double __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log10 ( double __x ) ; extern double __log10 ( double __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double modf ( double __x , double * __iptr ) ; extern double __modf ( double __x , double * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double expm1 ( double __x ) ; extern double __expm1 ( double __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log1p ( double __x ) ; extern double __log1p ( double __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double logb ( double __x ) ; extern double __logb ( double __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double exp2 ( double __x ) ; extern double __exp2 ( double __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double log2 ( double __x ) ; extern double __log2 ( double __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double pow ( double __x , double __y ) ; extern double __pow ( double __x , double __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double sqrt ( double __x ) ; extern double __sqrt ( double __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double hypot ( double __x , double __y ) ; extern double __hypot ( double __x , double __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double cbrt ( double __x ) ; extern double __cbrt ( double __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double ceil ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __ceil ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fabs ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __fabs ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double floor ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __floor ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmod ( double __x , double __y ) ; extern double __fmod ( double __x , double __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinf ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finite ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinf ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finite ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double drem ( double __x , double __y ) ; extern double __drem ( double __x , double __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double significand ( double __x ) ; extern double __significand ( double __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __copysign ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern double __nan ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnan ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnan ( double __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double j0 ( double ) ; extern double __j0 ( double ) ;
extern double j1 ( double ) ; extern double __j1 ( double ) ;
extern double jn ( int , double ) ; extern double __jn ( int , double ) ;
extern double y0 ( double ) ; extern double __y0 ( double ) ;
extern double y1 ( double ) ; extern double __y1 ( double ) ;
extern double yn ( int , double ) ; extern double __yn ( int , double ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double erf ( double ) ; extern double __erf ( double ) ;
extern double erfc ( double ) ; extern double __erfc ( double ) ;
extern double lgamma ( double ) ; extern double __lgamma ( double ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double tgamma ( double ) ; extern double __tgamma ( double ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double gamma ( double ) ; extern double __gamma ( double ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double lgamma_r ( double , int * __signgamp ) ; extern double __lgamma_r ( double , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double rint ( double __x ) ; extern double __rint ( double __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nextafter ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern double __nexttoward ( double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double remainder ( double __x , double __y ) ; extern double __remainder ( double __x , double __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalbn ( double __x , int __n ) ; extern double __scalbn ( double __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogb ( double __x ) ; extern int __ilogb ( double __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalbln ( double __x , long int __n ) ; extern double __scalbln ( double __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double nearbyint ( double __x ) ; extern double __nearbyint ( double __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double round ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __round ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double trunc ( double __x ) __attribute__ ( ( __const__ ) ) ; extern double __trunc ( double __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double remquo ( double __x , double __y , int * __quo ) ; extern double __remquo ( double __x , double __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrint ( double __x ) ; extern long int __lrint ( double __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrint ( double __x ) ; extern long long int __llrint ( double __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lround ( double __x ) ; extern long int __lround ( double __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llround ( double __x ) ; extern long long int __llround ( double __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fdim ( double __x , double __y ) ; extern double __fdim ( double __x , double __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmax ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ; extern double __fmin ( double __x , double __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassify ( double __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbit ( double __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double fma ( double __x , double __y , double __z ) ; extern double __fma ( double __x , double __y , double __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern double scalb ( double __x , double __n ) ; extern double __scalb ( double __x , double __n ) ;
# 84 "/usr/include/math.h" <System_Header>
# 94 "/usr/include/math.h" <System_Header>
# 104 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float acosf ( float __x ) ; extern float __acosf ( float __x ) ;
extern float asinf ( float __x ) ; extern float __asinf ( float __x ) ;
extern float atanf ( float __x ) ; extern float __atanf ( float __x ) ;
extern float atan2f ( float __y , float __x ) ; extern float __atan2f ( float __y , float __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float cosf ( float __x ) ; extern float __cosf ( float __x ) ;
extern float sinf ( float __x ) ; extern float __sinf ( float __x ) ;
extern float tanf ( float __x ) ; extern float __tanf ( float __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float coshf ( float __x ) ; extern float __coshf ( float __x ) ;
extern float sinhf ( float __x ) ; extern float __sinhf ( float __x ) ;
extern float tanhf ( float __x ) ; extern float __tanhf ( float __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float acoshf ( float __x ) ; extern float __acoshf ( float __x ) ;
extern float asinhf ( float __x ) ; extern float __asinhf ( float __x ) ;
extern float atanhf ( float __x ) ; extern float __atanhf ( float __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float expf ( float __x ) ; extern float __expf ( float __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float frexpf ( float __x , int * __exponent ) ; extern float __frexpf ( float __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float ldexpf ( float __x , int __exponent ) ; extern float __ldexpf ( float __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float logf ( float __x ) ; extern float __logf ( float __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log10f ( float __x ) ; extern float __log10f ( float __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float modff ( float __x , float * __iptr ) ; extern float __modff ( float __x , float * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float expm1f ( float __x ) ; extern float __expm1f ( float __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log1pf ( float __x ) ; extern float __log1pf ( float __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float logbf ( float __x ) ; extern float __logbf ( float __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float exp2f ( float __x ) ; extern float __exp2f ( float __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float log2f ( float __x ) ; extern float __log2f ( float __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float powf ( float __x , float __y ) ; extern float __powf ( float __x , float __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float sqrtf ( float __x ) ; extern float __sqrtf ( float __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float hypotf ( float __x , float __y ) ; extern float __hypotf ( float __x , float __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float cbrtf ( float __x ) ; extern float __cbrtf ( float __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __ceilf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __fabsf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float floorf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __floorf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmodf ( float __x , float __y ) ; extern float __fmodf ( float __x , float __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinff ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finitef ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinff ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finitef ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float dremf ( float __x , float __y ) ; extern float __dremf ( float __x , float __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float significandf ( float __x ) ; extern float __significandf ( float __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __copysignf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern float __nanf ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnanf ( float __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float j0f ( float ) ; extern float __j0f ( float ) ;
extern float j1f ( float ) ; extern float __j1f ( float ) ;
extern float jnf ( int , float ) ; extern float __jnf ( int , float ) ;
extern float y0f ( float ) ; extern float __y0f ( float ) ;
extern float y1f ( float ) ; extern float __y1f ( float ) ;
extern float ynf ( int , float ) ; extern float __ynf ( int , float ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float erff ( float ) ; extern float __erff ( float ) ;
extern float erfcf ( float ) ; extern float __erfcf ( float ) ;
extern float lgammaf ( float ) ; extern float __lgammaf ( float ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float tgammaf ( float ) ; extern float __tgammaf ( float ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float gammaf ( float ) ; extern float __gammaf ( float ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float lgammaf_r ( float , int * __signgamp ) ; extern float __lgammaf_r ( float , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float rintf ( float __x ) ; extern float __rintf ( float __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __nextafterf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern float __nexttowardf ( float __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float remainderf ( float __x , float __y ) ; extern float __remainderf ( float __x , float __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalbnf ( float __x , int __n ) ; extern float __scalbnf ( float __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogbf ( float __x ) ; extern int __ilogbf ( float __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalblnf ( float __x , long int __n ) ; extern float __scalblnf ( float __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float nearbyintf ( float __x ) ; extern float __nearbyintf ( float __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float roundf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __roundf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float truncf ( float __x ) __attribute__ ( ( __const__ ) ) ; extern float __truncf ( float __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float remquof ( float __x , float __y , int * __quo ) ; extern float __remquof ( float __x , float __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrintf ( float __x ) ; extern long int __lrintf ( float __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrintf ( float __x ) ; extern long long int __llrintf ( float __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lroundf ( float __x ) ; extern long int __lroundf ( float __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llroundf ( float __x ) ; extern long long int __llroundf ( float __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fdimf ( float __x , float __y ) ; extern float __fdimf ( float __x , float __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fmaxf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ; extern float __fminf ( float __x , float __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassifyf ( float __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbitf ( float __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float fmaf ( float __x , float __y , float __z ) ; extern float __fmaf ( float __x , float __y , float __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern float scalbf ( float __x , float __n ) ; extern float __scalbf ( float __x , float __n ) ;
# 105 "/usr/include/math.h" <System_Header>
# 140 "/usr/include/math.h" <System_Header>
# 151 "/usr/include/math.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 43 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 50 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 53 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double acosl ( long double __x ) ; extern long double __acosl ( long double __x ) ;
extern long double asinl ( long double __x ) ; extern long double __asinl ( long double __x ) ;
extern long double atanl ( long double __x ) ; extern long double __atanl ( long double __x ) ;
extern long double atan2l ( long double __y , long double __x ) ; extern long double __atan2l ( long double __y , long double __x ) ;
# 62 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double cosl ( long double __x ) ; extern long double __cosl ( long double __x ) ;
extern long double sinl ( long double __x ) ; extern long double __sinl ( long double __x ) ;
extern long double tanl ( long double __x ) ; extern long double __tanl ( long double __x ) ;
# 69 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 71 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double coshl ( long double __x ) ; extern long double __coshl ( long double __x ) ;
extern long double sinhl ( long double __x ) ; extern long double __sinhl ( long double __x ) ;
extern long double tanhl ( long double __x ) ; extern long double __tanhl ( long double __x ) ;
# 87 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double acoshl ( long double __x ) ; extern long double __acoshl ( long double __x ) ;
extern long double asinhl ( long double __x ) ; extern long double __asinhl ( long double __x ) ;
extern long double atanhl ( long double __x ) ; extern long double __atanhl ( long double __x ) ;
# 96 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 99 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double expl ( long double __x ) ; extern long double __expl ( long double __x ) ;
# 102 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double frexpl ( long double __x , int * __exponent ) ; extern long double __frexpl ( long double __x , int * __exponent ) ;
# 105 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double ldexpl ( long double __x , int __exponent ) ; extern long double __ldexpl ( long double __x , int __exponent ) ;
# 108 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double logl ( long double __x ) ; extern long double __logl ( long double __x ) ;
# 111 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log10l ( long double __x ) ; extern long double __log10l ( long double __x ) ;
# 114 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double modfl ( long double __x , long double * __iptr ) ; extern long double __modfl ( long double __x , long double * __iptr ) ;
# 127 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double expm1l ( long double __x ) ; extern long double __expm1l ( long double __x ) ;
# 130 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log1pl ( long double __x ) ; extern long double __log1pl ( long double __x ) ;
# 133 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double logbl ( long double __x ) ; extern long double __logbl ( long double __x ) ;
# 140 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double exp2l ( long double __x ) ; extern long double __exp2l ( long double __x ) ;
# 143 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double log2l ( long double __x ) ; extern long double __log2l ( long double __x ) ;
# 149 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 152 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double powl ( long double __x , long double __y ) ; extern long double __powl ( long double __x , long double __y ) ;
# 155 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double sqrtl ( long double __x ) ; extern long double __sqrtl ( long double __x ) ;
# 161 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double hypotl ( long double __x , long double __y ) ; extern long double __hypotl ( long double __x , long double __y ) ;
# 168 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double cbrtl ( long double __x ) ; extern long double __cbrtl ( long double __x ) ;
# 174 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 177 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __ceill ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 180 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __fabsl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 183 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __floorl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 186 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmodl ( long double __x , long double __y ) ; extern long double __fmodl ( long double __x , long double __y ) ;
# 191 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 194 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 203 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isinfl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 207 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int finitel ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 210 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double dreml ( long double __x , long double __y ) ; extern long double __dreml ( long double __x , long double __y ) ;
# 214 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double significandl ( long double __x ) ; extern long double __significandl ( long double __x ) ;
# 220 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __copysignl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 227 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ; extern long double __nanl ( const char * __tagb ) __attribute__ ( ( __const__ ) ) ;
# 233 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 240 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int isnanl ( long double __value ) __attribute__ ( ( __const__ ) ) ;
# 246 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double j0l ( long double ) ; extern long double __j0l ( long double ) ;
extern long double j1l ( long double ) ; extern long double __j1l ( long double ) ;
extern long double jnl ( int , long double ) ; extern long double __jnl ( int , long double ) ;
extern long double y0l ( long double ) ; extern long double __y0l ( long double ) ;
extern long double y1l ( long double ) ; extern long double __y1l ( long double ) ;
extern long double ynl ( int , long double ) ; extern long double __ynl ( int , long double ) ;
# 258 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double erfl ( long double ) ; extern long double __erfl ( long double ) ;
extern long double erfcl ( long double ) ; extern long double __erfcl ( long double ) ;
extern long double lgammal ( long double ) ; extern long double __lgammal ( long double ) ;
# 267 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double tgammal ( long double ) ; extern long double __tgammal ( long double ) ;
# 273 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double gammal ( long double ) ; extern long double __gammal ( long double ) ;
# 280 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double lgammal_r ( long double , int * __signgamp ) ; extern long double __lgammal_r ( long double , int * __signgamp ) ;
# 288 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double rintl ( long double __x ) ; extern long double __rintl ( long double __x ) ;
# 291 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nextafterl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 294 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __nexttowardl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 297 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double remainderl ( long double __x , long double __y ) ; extern long double __remainderl ( long double __x , long double __y ) ;
# 301 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalbnl ( long double __x , int __n ) ; extern long double __scalbnl ( long double __x , int __n ) ;
# 305 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int ilogbl ( long double __x ) ; extern int __ilogbl ( long double __x ) ;
# 310 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalblnl ( long double __x , long int __n ) ; extern long double __scalblnl ( long double __x , long int __n ) ;
# 314 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double nearbyintl ( long double __x ) ; extern long double __nearbyintl ( long double __x ) ;
# 318 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __roundl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 322 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ; extern long double __truncl ( long double __x ) __attribute__ ( ( __const__ ) ) ;
# 327 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double remquol ( long double __x , long double __y , int * __quo ) ; extern long double __remquol ( long double __x , long double __y , int * __quo ) ;
# 331 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
# 334 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lrintl ( long double __x ) ; extern long int __lrintl ( long double __x ) ;
# 337 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llrintl ( long double __x ) ; extern long long int __llrintl ( long double __x ) ;
# 340 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long int lroundl ( long double __x ) ; extern long int __lroundl ( long double __x ) ;
# 343 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long long int llroundl ( long double __x ) ; extern long long int __llroundl ( long double __x ) ;
# 346 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fdiml ( long double __x , long double __y ) ; extern long double __fdiml ( long double __x , long double __y ) ;
# 349 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fmaxl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 352 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ; extern long double __fminl ( long double __x , long double __y ) __attribute__ ( ( __const__ ) ) ;
# 356 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __fpclassifyl ( long double __value )
__attribute__ ( ( __const__ ) ) ;
# 360 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern int __signbitl ( long double __value )
__attribute__ ( ( __const__ ) ) ;
# 365 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double fmal ( long double __x , long double __y , long double __z ) ; extern long double __fmal ( long double __x , long double __y , long double __z ) ;
# 382 "/usr/include/x86_64-linux-gnu/bits/mathcalls.h" <System_Header>
extern long double scalbl ( long double __x , long double __n ) ; extern long double __scalbl ( long double __x , long double __n ) ;
# 152 "/usr/include/math.h" <System_Header>
# 167 "/usr/include/math.h" <System_Header>
extern int signgam ;
# 172 "/usr/include/math.h" <System_Header>
# 206 "/usr/include/math.h" <System_Header>
# 208 "/usr/include/math.h" <System_Header>
enum
{
FP_NAN =
# 213 "/usr/include/math.h" <System_Header>
0 ,
FP_INFINITE =
# 216 "/usr/include/math.h" <System_Header>
1 ,
FP_ZERO =
# 219 "/usr/include/math.h" <System_Header>
2 ,
FP_SUBNORMAL =
# 222 "/usr/include/math.h" <System_Header>
3 ,
FP_NORMAL =
# 225 "/usr/include/math.h" <System_Header>
4
} ;
# 230 "/usr/include/math.h" <System_Header>
# 232 "/usr/include/math.h" <System_Header>
# 248 "/usr/include/math.h" <System_Header>
# 268 "/usr/include/math.h" <System_Header>
# 282 "/usr/include/math.h" <System_Header>
# 290 "/usr/include/math.h" <System_Header>
# 304 "/usr/include/math.h" <System_Header>
# 318 "/usr/include/math.h" <System_Header>
# 324 "/usr/include/math.h" <System_Header>
# 346 "/usr/include/math.h" <System_Header>
typedef enum
{
_IEEE_ = - 1 ,
_SVID_ ,
_XOPEN_ ,
_POSIX_ ,
_ISOC_
} _LIB_VERSION_TYPE ;
# 358 "/usr/include/math.h" <System_Header>
extern _LIB_VERSION_TYPE _LIB_VERSION ;
# 368 "/usr/include/math.h" <System_Header>
# 372 "/usr/include/math.h" <System_Header>
struct exception
# 374 "/usr/include/math.h" <System_Header>
{
int type ;
char * name ;
double arg1 ;
double arg2 ;
double retval ;
} ;
# 385 "/usr/include/math.h" <System_Header>
extern int matherr ( struct exception * __exc ) ;
# 390 "/usr/include/math.h" <System_Header>
# 398 "/usr/include/math.h" <System_Header>
# 411 "/usr/include/math.h" <System_Header>
# 430 "/usr/include/math.h" <System_Header>
# 450 "/usr/include/math.h" <System_Header>
# 470 "/usr/include/math.h" <System_Header>
# 476 "/usr/include/math.h" <System_Header>
# 482 "/usr/include/math.h" <System_Header>
# 484 "/usr/include/math.h" <System_Header>
# 492 "/usr/include/math.h" <System_Header>
# 500 "/usr/include/math.h" <System_Header>
# 508 "/usr/include/math.h" <System_Header>
# 516 "/usr/include/math.h" <System_Header>
# 524 "/usr/include/math.h" <System_Header>
# 60 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 254 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 301 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 310 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_acos ( double ) ;
# 313 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_asin ( double ) ;
# 316 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_atan2 ( double , double ) ;
# 319 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_atan ( double ) ;
# 322 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_tan ( double ) ;
# 325 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_cos ( double ) ;
# 328 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_sin ( double ) ;
# 331 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fabs ( double ) ;
# 334 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_sqrt ( double ) ;
# 337 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_log ( double ) ;
# 340 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_log10 ( double ) ;
# 343 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_exp ( double ) ;
# 346 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_pow ( double , double ) ;
# 350 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fmin ( double , double ) ;
# 353 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fminf ( float , float ) ;
# 356 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
double __builtin_fmax ( double , double ) ;
# 359 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fmaxf ( float , float ) ;
# 362 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_acosf ( float ) ;
# 365 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_asinf ( float ) ;
# 368 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_atan2f ( float , float ) ;
# 371 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_atanf ( float ) ;
# 374 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_tanf ( float ) ;
# 377 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_cosf ( float ) ;
# 380 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_sinf ( float ) ;
# 383 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_fabsf ( float ) ;
# 386 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_sqrtf ( float ) ;
# 389 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_logf ( float ) ;
# 392 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_log10f ( float ) ;
# 395 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_expf ( float ) ;
# 398 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
float __builtin_powf ( float , float ) ;
# 406 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 418 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
#pragma libm ( acosf , acoshf , asinf , asinhf , atanhf , atan2f )
#pragma libm ( cbrtf , ceilf , copysignf , cosf , coshf )
#pragma libm ( erff , erfcf , expf , exp2f , exp10f , expm1f )
#pragma libm ( fabsf , floorf , fmaf , fminf , fmaxf )
#pragma libm ( ilogbf )
#pragma libm ( ldexpf , lgammaf , llrintf , llroundf , logbf , log1pf , logf , log2f , log10f , lrintf , lroundf )
#pragma libm ( nanf , nearbyintf , nextafterf )
#pragma libm ( powf )
#pragma libm ( remainderf , remquof , rintf , roundf , rsqrtf )
#pragma libm ( scalblnf , scalbnf , sinf , sinhf , sqrtf )
#pragma libm ( tanf , tanhf , tgammaf , truncf )
#pragma libm ( abs , acos , acosh , asin , asinh , atanh , atan2 )
#pragma libm ( cbrt , ceil , copysign , cos , cosh )
#pragma libm ( erf , erfc , exp , exp2 , exp10 , expm1 )
#pragma libm ( fabs , floor , fma , fmin , fmax )
#pragma libm ( ilogb , isinf , isfinite , isnan )
#pragma libm ( ldexp , lgamma , llrint , llround , logb , log1p , log , log2 , log10 , lrint , lround )
#pragma libm ( pow )
#pragma libm ( nan , nearbyint , nextafter )
#pragma libm ( remainder , remquo , rint , round , rsqrt )
#pragma libm ( scalbln , scalbn , sin , sinh , sqrt )
#pragma libm ( tan , tanh , tgamma , trunc )
# 641 "main.c"
# 641 "main.c"
# 1 "../../common/type.h"
# 4 "../../common/type.h"
typedef enum { false , true } logical ;
typedef struct {
double real ;
double imag ;
} dcomplex ;
# 642 "main.c"
# 644 "main.c"
void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter ,
double t , double mops , char * optype , logical verified , char * npbversion ,
char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 ,
char * cs6 , char * cs7 )
{
char size [ 16 ] ;
int j ;
# 652 "main.c"
printf ( "\n\n %s Benchmark Completed.\n" , name ) ;
printf ( " Class = %12c\n" , class ) ;
# 655 "main.c"
# 660 "main.c"
if ( ( n2 == 0 ) && ( n3 == 0 ) ) {
if ( ( name [ 0 ] == 'E' ) && ( name [ 1 ] == 'P' ) ) {
sprintf ( size , "%15.0lf" , __builtin_pow ( 2.0 , n1 ) ) ;
j = 14 ;
if ( size [ j ] == '.' ) {
size [ j ] = ' ' ;
j -- ;
}
size [ j + 1 ] = '\0' ;
printf ( " Size = %15s\n" , size ) ;
} else {
printf ( " Size = %12d\n" , n1 ) ;
}
} else {
printf ( " Size = %4dx%4dx%4d\n" , n1 , n2 , n3 ) ;
}
# 677 "main.c"
printf ( " Iterations = %12d\n" , niter ) ;
printf ( " Time in seconds = %12.2lf\n" , t ) ;
printf ( " Mop/s total = %15.2lf\n" , mops ) ;
printf ( " Operation type = %24s\n" , optype ) ;
if ( verified )
printf ( " Verification = %12s\n" , "SUCCESSFUL" ) ;
else
printf ( " Verification = %12s\n" , "UNSUCCESSFUL" ) ;
printf ( " Version = %12s\n" , npbversion ) ;
printf ( " Compile date = %12s\n" , compiletime ) ;
printf ( "\n Compile options:\n"
" CC = %s\n" , cs1 ) ;
printf ( " CLINK = %s\n" , cs2 ) ;
printf ( " C_LIB = %s\n" , cs3 ) ;
printf ( " C_INC = %s\n" , cs4 ) ;
printf ( " CFLAGS = %s\n" , cs5 ) ;
printf ( " CLINKFLAGS = %s\n" , cs6 ) ;
printf ( " RAND = %s\n" , cs7 ) ;
# 697 "main.c"
printf ( "\n--------------------------------------\n"
" Please send all errors/feedbacks to:\n"
" Center for Manycore Programming\n"
" cmp@aces.snu.ac.kr\n"
" http://aces.snu.ac.kr\n"
"--------------------------------------\n\n" ) ;
}
# 704 "main.c"
# 1 "../../common/wtime.h"
# 3 "../../common/wtime.h"
# 705 "main.c"
# 705 "main.c"
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 27 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 28 "/usr/include/time.h" <System_Header>
# 34 "/usr/include/time.h" <System_Header>
# 37 "/usr/include/time.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 16 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 1 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 22 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 26 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 35 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 47 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 50 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 54 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 62 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 93 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 103 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 121 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 123 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 126 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 165 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 168 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 245 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 248 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 363 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 398 "/usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include-gcc50/stddef.h" <System_Header>
# 38 "/usr/include/time.h" <System_Header>
# 40 "/usr/include/time.h" <System_Header>
# 41 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 44 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 47 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 60 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 62 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 64 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 66 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 68 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 70 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 72 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 74 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 76 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 78 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 80 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 83 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 42 "/usr/include/time.h" <System_Header>
# 43 "/usr/include/time.h" <System_Header>
# 132 "/usr/include/time.h" <System_Header>
struct tm
{
int tm_sec ;
int tm_min ;
int tm_hour ;
int tm_mday ;
int tm_mon ;
int tm_year ;
int tm_wday ;
int tm_yday ;
int tm_isdst ;
# 146 "/usr/include/time.h" <System_Header>
long int tm_gmtoff ;
const char * tm_zone ;
# 152 "/usr/include/time.h" <System_Header>
} ;
# 160 "/usr/include/time.h" <System_Header>
struct itimerspec
{
struct timespec it_interval ;
struct timespec it_value ;
} ;
# 167 "/usr/include/time.h" <System_Header>
struct sigevent ;
# 188 "/usr/include/time.h" <System_Header>
extern clock_t clock ( void ) ;
# 191 "/usr/include/time.h" <System_Header>
extern time_t time ( time_t * __timer ) ;
# 194 "/usr/include/time.h" <System_Header>
extern double difftime ( time_t __time1 , time_t __time0 )
__attribute__ ( ( __const__ ) ) ;
# 198 "/usr/include/time.h" <System_Header>
extern time_t mktime ( struct tm * __tp ) ;
# 204 "/usr/include/time.h" <System_Header>
extern size_t strftime ( char * __restrict __s , size_t __maxsize ,
const char * __restrict __format ,
const struct tm * __restrict __tp ) ;
# 220 "/usr/include/time.h" <System_Header>
# 221 "/usr/include/time.h" <System_Header>
# 1 "/usr/include/xlocale.h" <System_Header>
# 18 "/usr/include/xlocale.h" <System_Header>
# 222 "/usr/include/time.h" <System_Header>
# 223 "/usr/include/time.h" <System_Header>
extern size_t strftime_l ( char * __restrict __s , size_t __maxsize ,
const char * __restrict __format ,
const struct tm * __restrict __tp ,
__locale_t __loc ) ;
# 238 "/usr/include/time.h" <System_Header>
extern struct tm * gmtime ( const time_t * __timer ) ;
# 242 "/usr/include/time.h" <System_Header>
extern struct tm * localtime ( const time_t * __timer ) ;
# 248 "/usr/include/time.h" <System_Header>
extern struct tm * gmtime_r ( const time_t * __restrict __timer ,
struct tm * __restrict __tp ) ;
# 253 "/usr/include/time.h" <System_Header>
extern struct tm * localtime_r ( const time_t * __restrict __timer ,
struct tm * __restrict __tp ) ;
# 260 "/usr/include/time.h" <System_Header>
extern char * asctime ( const struct tm * __tp ) ;
# 263 "/usr/include/time.h" <System_Header>
extern char * ctime ( const time_t * __timer ) ;
# 268 "/usr/include/time.h" <System_Header>
# 271 "/usr/include/time.h" <System_Header>
extern char * asctime_r ( const struct tm * __restrict __tp ,
char * __restrict __buf ) ;
# 275 "/usr/include/time.h" <System_Header>
extern char * ctime_r ( const time_t * __restrict __timer ,
char * __restrict __buf ) ;
# 281 "/usr/include/time.h" <System_Header>
extern char * __tzname [ 2 ] ;
extern int __daylight ;
extern long int __timezone ;
# 288 "/usr/include/time.h" <System_Header>
extern char * tzname [ 2 ] ;
# 292 "/usr/include/time.h" <System_Header>
extern void tzset ( void ) ;
# 297 "/usr/include/time.h" <System_Header>
extern int daylight ;
extern long int timezone ;
# 303 "/usr/include/time.h" <System_Header>
extern int stime ( const time_t * __when ) ;
# 309 "/usr/include/time.h" <System_Header>
# 316 "/usr/include/time.h" <System_Header>
# 318 "/usr/include/time.h" <System_Header>
extern time_t timegm ( struct tm * __tp ) ;
# 321 "/usr/include/time.h" <System_Header>
extern time_t timelocal ( struct tm * __tp ) ;
# 324 "/usr/include/time.h" <System_Header>
extern int dysize ( int __year ) __attribute__ ( ( __const__ ) ) ;
# 333 "/usr/include/time.h" <System_Header>
extern int nanosleep ( const struct timespec * __requested_time ,
struct timespec * __remaining ) ;
# 338 "/usr/include/time.h" <System_Header>
extern int clock_getres ( clockid_t __clock_id , struct timespec * __res ) ;
# 341 "/usr/include/time.h" <System_Header>
extern int clock_gettime ( clockid_t __clock_id , struct timespec * __tp ) ;
# 344 "/usr/include/time.h" <System_Header>
extern int clock_settime ( clockid_t __clock_id , const struct timespec * __tp )
;
# 352 "/usr/include/time.h" <System_Header>
extern int clock_nanosleep ( clockid_t __clock_id , int __flags ,
const struct timespec * __req ,
struct timespec * __rem ) ;
# 357 "/usr/include/time.h" <System_Header>
extern int clock_getcpuclockid ( pid_t __pid , clockid_t * __clock_id ) ;
# 362 "/usr/include/time.h" <System_Header>
extern int timer_create ( clockid_t __clock_id ,
struct sigevent * __restrict __evp ,
timer_t * __restrict __timerid ) ;
# 367 "/usr/include/time.h" <System_Header>
extern int timer_delete ( timer_t __timerid ) ;
# 370 "/usr/include/time.h" <System_Header>
extern int timer_settime ( timer_t __timerid , int __flags ,
const struct itimerspec * __restrict __value ,
struct itimerspec * __restrict __ovalue ) ;
# 375 "/usr/include/time.h" <System_Header>
extern int timer_gettime ( timer_t __timerid , struct itimerspec * __value )
;
# 379 "/usr/include/time.h" <System_Header>
extern int timer_getoverrun ( timer_t __timerid ) ;
# 706 "main.c"
# 707 "main.c"
# 1 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 16 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/features.h" <System_Header>
# 16 "/usr/include/features.h" <System_Header>
# 22 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 23 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/types.h" <System_Header>
# 24 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 25 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/time.h" <System_Header>
# 16 "/usr/include/time.h" <System_Header>
# 20 "/usr/include/time.h" <System_Header>
# 26 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 27 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 21 "/usr/include/x86_64-linux-gnu/bits/time.h" <System_Header>
# 28 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 29 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 1 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 17 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 19 "/usr/include/x86_64-linux-gnu/sys/select.h" <System_Header>
# 30 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 54 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
struct timezone
{
int tz_minuteswest ;
int tz_dsttime ;
} ;
# 61 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
typedef struct timezone * __restrict __timezone_ptr_t ;
# 70 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int gettimeofday ( struct timeval * __restrict __tv ,
__timezone_ptr_t __tz ) ;
# 76 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int settimeofday ( const struct timeval * __tv ,
const struct timezone * __tz )
;
# 84 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int adjtime ( const struct timeval * __delta ,
struct timeval * __olddelta ) ;
# 90 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
enum __itimer_which
{
ITIMER_REAL = 0 ,
# 96 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
ITIMER_VIRTUAL = 1 ,
# 99 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
ITIMER_PROF = 2
# 103 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
} ;
# 106 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
struct itimerval
{
struct timeval it_interval ;
struct timeval it_value ;
} ;
# 120 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
typedef int __itimer_which_t ;
# 124 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int getitimer ( __itimer_which_t __which ,
struct itimerval * __value ) ;
# 130 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int setitimer ( __itimer_which_t __which ,
const struct itimerval * __restrict __new ,
struct itimerval * __restrict __old ) ;
# 137 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int utimes ( const char * __file , const struct timeval __tvp [ 2 ] )
;
# 142 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int lutimes ( const char * __file , const struct timeval __tvp [ 2 ] )
;
# 146 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
extern int futimes ( int __fd , const struct timeval __tvp [ 2 ] ) ;
# 161 "/usr/include/x86_64-linux-gnu/sys/time.h" <System_Header>
# 708 "main.c"
# 710 "main.c"
void wtime_ ( double * t )
{
static int sec = - 1 ;
struct timeval tv ;
gettimeofday ( & tv , ( void * ) 0 ) ;
if ( sec < 0 ) sec = tv . tv_sec ;
* t = ( tv . tv_sec - sec ) + 1.0e-6 * tv . tv_usec ;
}
# 719 "main.c"
# 748 "main.c"
# 753 "main.c"
# 779 "main.c"
# 783 "main.c"
# 1 "/usr/include/stdio.h" <System_Header>
# 17 "/usr/include/stdio.h" <System_Header>
# 21 "/usr/include/stdio.h" <System_Header>
# 784 "main.c"
# 784 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 17 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 19 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 1 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 3 "/opt/pgi/linux86-64/17.10/include/stdlib.h" <System_Header>
# 20 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 25 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * malloc_managed ( size_t ) ;
extern void * calloc_managed ( size_t , size_t ) ;
extern void free_managed ( void * ) ;
extern void cfree_managed ( void * ) ;
# 31 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
extern void * realloc_managed ( void * , size_t ) ;
extern void * valloc_managed ( size_t ) ;
extern void * pvalloc_managed ( size_t ) ;
extern void * memalign_managed ( size_t , size_t ) ;
extern int posix_memalign_managed ( void * * , size_t , size_t ) ;
extern char * tmpnam_managed ( char * ) ;
# 43 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 58 "/opt/pgi/linux86-64/17.10/include_man/stdlib.h" <System_Header>
# 785 "main.c"
# 785 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 14 "/opt/pgi/linux86-64/17.10/include/math.h" <System_Header>
# 786 "main.c"
# 787 "main.c"
# 1 "../globals.h"
# 29 "../globals.h"
# 34 "../globals.h"
# 44 "../globals.h"
# 1 "../npbparams.h"
# 6 "../npbparams.h"
# 45 "../globals.h"
# 45 "../globals.h"
# 1 "../../common/type.h"
# 46 "../globals.h"
# 47 "../globals.h"
# 50 "../globals.h"
# 53 "../globals.h"
# 56 "../globals.h"
# 60 "../globals.h"
static int nx [ ( 10 + 1 ) + 1 ] ;
static int ny [ ( 10 + 1 ) + 1 ] ;
static int nz [ ( 10 + 1 ) + 1 ] ;
# 66 "../globals.h"
static char Class ;
# 69 "../globals.h"
static int debug_vec [ 8 ] ;
# 72 "../globals.h"
static int m1 [ ( 10 + 1 ) + 1 ] ;
static int m2 [ ( 10 + 1 ) + 1 ] ;
static int m3 [ ( 10 + 1 ) + 1 ] ;
static int ir [ ( 10 + 1 ) + 1 ] ;
static int lt , lb ;
static int m1lt , m2lt , m3lt ;
# 84 "../globals.h"
# 90 "../globals.h"
static logical timeron ;
# 788 "main.c"
# 788 "main.c"
# 789 "main.c"
# 1 "../../common/timers.h"
# 4 "../../common/timers.h"
void timer_clear ( int n ) ;
void timer_start ( int n ) ;
void timer_stop ( int n ) ;
double timer_read ( int n ) ;
# 790 "main.c"
# 790 "main.c"
# 1 "../../common/print_results.h"
# 4 "../../common/print_results.h"
void print_results ( char * name , char class , int n1 , int n2 , int n3 , int niter ,
double t , double mops , char * optype , int verified , char * npbversion ,
char * compiletime , char * cs1 , char * cs2 , char * cs3 , char * cs4 , char * cs5 ,
char * cs6 , char * cs7 ) ;
# 791 "main.c"
# 791 "main.c"
# 1 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 10 "/opt/pgi/linux86-64/17.10/include/openacc.h" <System_Header>
# 792 "main.c"
# 795 "main.c"
static void setup ( int * n1 , int * n2 , int * n3 ) ;
static void mg3P ( double u [ ] , double v [ ] , double r [ ] ,
double a [ 4 ] , double c [ 4 ] , int n1 , int n2 , int n3 ) ;
static void psinv ( double * or , double * ou , int n1 , int n2 , int n3 ,
double c [ 4 ] , int k ) ;
static void resid ( double * ou , double * ov , double * or , int n1 , int n2 , int n3 ,
double a [ 4 ] , int k ) ;
static void rprj3 ( double * or , int m1k , int m2k , int m3k ,
double * os , int m1j , int m2j , int m3j , int k ) ;
static void interp ( double * oz , int mm1 , int mm2 , int mm3 ,
double * ou , int n1 , int n2 , int n3 , int k ) ;
static void norm2u3 ( double * or , int n1 , int n2 , int n3 ,
double * rnm2 , double * rnmu ,
int nx , int ny , int nz ) ;
static void rep_nrm ( double * u , int n1 , int n2 , int n3 , char * title , int kk ) ;
static void comm3 ( double * ou , int n1 , int n2 , int n3 , int kk ) ;
static void zran3 ( double * oz , int n1 , int n2 , int n3 , int nx , int ny , int k ) ;
static void showall ( double * oz , int n1 , int n2 , int n3 ) ;
static double power ( double a , int n ) ;
static void bubble ( double ten [ ] [ 2 ] , int j1 [ ] [ 2 ] , int j2 [ ] [ 2 ] , int j3 [ ] [ 2 ] ,
int m , int ind ) ;
static void zero3 ( double * oz , int n1 , int n2 , int n3 ) ;
# 819 "main.c"
static double u [ ( ( ( ( 1 * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) ) + ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) + 5 * ( 2 + ( 1 << 10 ) ) + 7 * 10 + 6 ) / 7 ) * 8 ) ] ;
static double v [ ( ( ( ( 1 * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) ) + ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) + 5 * ( 2 + ( 1 << 10 ) ) + 7 * 10 + 6 ) / 7 ) * 8 ) ] ;
static double r [ ( ( ( ( 1 * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) ) + ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) + 5 * ( 2 + ( 1 << 10 ) ) + 7 * 10 + 6 ) / 7 ) * 8 ) ] ;
int gnr = ( ( ( ( 1 * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) ) + ( 2 + ( 1 << 10 ) ) * ( 2 + ( 1 << 10 ) ) + 5 * ( 2 + ( 1 << 10 ) ) + 7 * 10 + 6 ) / 7 ) * 8 ) ;
# 830 "main.c"
static int is1 , is2 , is3 , ie1 , ie2 , ie3 ;
# 834 "main.c"
int main ( )
{
int k , it ;
double t , tinit , mflops ;
# 843 "main.c"
double a [ 4 ] , c [ 4 ] ;
# 845 "main.c"
double rnm2 , rnmu , old2 , oldu , epsilon ;
int n1 , n2 , n3 , nit ;
double nn , verify_value , err ;
logical verified ;
# 850 "main.c"
int i ;
char * t_names [ 10 ] ;
double tmax ;
# 854 "main.c"
for ( i = 0 ; i < 10 ; i ++ ) {
timer_clear ( i ) ;
}
acc_init ( acc_device_default ) ;
timer_start ( 0 ) ;
# 860 "main.c"
FILE * fp ;
if ( ( fp = fopen ( "timer.flag" , "r" ) ) != ( ( void * ) 0 ) ) {
timeron = true ;
t_names [ 0 ] = "init" ;
t_names [ 1 ] = "benchmk" ;
t_names [ 2 ] = "mg3P" ;
t_names [ 3 ] = "psinv" ;
t_names [ 4 ] = "resid" ;
t_names [ 6 ] = "rprj3" ;
t_names [ 7 ] = "interp" ;
t_names [ 8 ] = "norm2" ;
t_names [ 9 ] = "comm3" ;
fclose ( fp ) ;
} else {
timeron = false ;
}
# 880 "main.c"
printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-ACC-C) - MG Benchmark\n\n" ) ;
# 882 "main.c"
if ( ( fp = fopen ( "mg.input" , "r" ) ) != ( ( void * ) 0 ) ) {
int result ;
printf ( " Reading from input file mg.input\n" ) ;
result = __isoc99_fscanf ( fp , "%d\n" , & lt ) ;
while ( fgetc ( fp ) != '\n' ) ;
result = __isoc99_fscanf ( fp , "%d%d%d" , & nx [ lt ] , & ny [ lt ] , & nz [ lt ] ) ;
while ( fgetc ( fp ) != '\n' ) ;
result = __isoc99_fscanf ( fp , "%d" , & nit ) ;
while ( fgetc ( fp ) != '\n' ) ;
for ( i = 0 ; i <= 7 ; i ++ ) {
result = __isoc99_fscanf ( fp , "%d" , & debug_vec [ i ] ) ;
}
fclose ( fp ) ;
} else {
printf ( " No input file. Using compiled defaults \n" ) ;
lt = 10 ;
nit = 50 ;
nx [ lt ] = 1024 ;
ny [ lt ] = 1024 ;
nz [ lt ] = 1024 ;
for ( i = 0 ; i <= 7 ; i ++ ) {
debug_vec [ i ] = 0 ;
}
}
# 908 "main.c"
if ( ( nx [ lt ] != ny [ lt ] ) || ( nx [ lt ] != nz [ lt ] ) ) {
Class = 'U' ;
} else if ( nx [ lt ] == 32 && nit == 4 ) {
Class = 'S' ;
} else if ( nx [ lt ] == 128 && nit == 4 ) {
Class = 'W' ;
} else if ( nx [ lt ] == 256 && nit == 4 ) {
Class = 'A' ;
} else if ( nx [ lt ] == 256 && nit == 20 ) {
Class = 'B' ;
} else if ( nx [ lt ] == 512 && nit == 20 ) {
Class = 'C' ;
} else if ( nx [ lt ] == 1024 && nit == 50 ) {
Class = 'D' ;
} else if ( nx [ lt ] == 2048 && nit == 50 ) {
Class = 'E' ;
} else {
Class = 'U' ;
}
# 928 "main.c"
a [ 0 ] = - 8.0 / 3.0 ;
a [ 1 ] = 0.0 ;
a [ 2 ] = 1.0 / 6.0 ;
a [ 3 ] = 1.0 / 12.0 ;
if ( Class == 'A' || Class == 'S' || Class == 'W' ) {
c [ 0 ] = - 3.0 / 8.0 ;
c [ 1 ] = + 1.0 / 32.0 ;
c [ 2 ] = - 1.0 / 64.0 ;
c [ 3 ] = 0.0 ;
} else {
c [ 0 ] = - 3.0 / 17.0 ;
c [ 1 ] = + 1.0 / 33.0 ;
c [ 2 ] = - 1.0 / 61.0 ;
c [ 3 ] = 0.0 ;
}
lb = 1 ;
k = lt ;
# 967 "main.c"
#pragma acc data create ( u [ 0 : gnr ] , v [ 0 : gnr ] , r [ 0 : gnr ] )
{
setup ( & n1 , & n2 , & n3 ) ;
zero3 ( u , n1 , n2 , n3 ) ;
zran3 ( v , n1 , n2 , n3 , nx [ lt ] , ny [ lt ] , k ) ;
# 973 "main.c"
norm2u3 ( v , n1 , n2 , n3 , & rnm2 , & rnmu , nx [ lt ] , ny [ lt ] , nz [ lt ] ) ;
# 975 "main.c"
printf ( " Size: %4dx%4dx%4d (class %c)\n" , nx [ lt ] , ny [ lt ] , nz [ lt ] , Class ) ;
printf ( " Iterations: %3d\n" , nit ) ;
printf ( "\n" ) ;
# 979 "main.c"
resid ( u , v , r , n1 , n2 , n3 , a , k ) ;
norm2u3 ( r , n1 , n2 , n3 , & rnm2 , & rnmu , nx [ lt ] , ny [ lt ] , nz [ lt ] ) ;
old2 = rnm2 ;
oldu = rnmu ;
# 984 "main.c"
mg3P ( u , v , r , a , c , n1 , n2 , n3 ) ;
resid ( u , v , r , n1 , n2 , n3 , a , k ) ;
setup ( & n1 , & n2 , & n3 ) ;
zero3 ( u , n1 , n2 , n3 ) ;
zran3 ( v , n1 , n2 , n3 , nx [ lt ] , ny [ lt ] , k ) ;
# 993 "main.c"
timer_stop ( 0 ) ;
tinit = timer_read ( 0 ) ;
# 996 "main.c"
printf ( " Initialization time: %15.3f seconds\n\n" , tinit ) ;
# 998 "main.c"
for ( i = 1 ; i < 10 ; i ++ ) {
timer_clear ( i ) ;
}
# 1002 "main.c"
timer_start ( 1 ) ;
# 1004 "main.c"
resid ( u , v , r , n1 , n2 , n3 , a , k ) ;
norm2u3 ( r , n1 , n2 , n3 , & rnm2 , & rnmu , nx [ lt ] , ny [ lt ] , nz [ lt ] ) ;
old2 = rnm2 ;
oldu = rnmu ;
# 1009 "main.c"
for ( it = 1 ; it <= nit ; it ++ ) {
if ( ( it == 1 ) || ( it == nit ) || ( ( it % 100 ) == 0 ) ) {
printf ( " iter %3d\n" , it ) ;
}
mg3P ( u , v , r , a , c , n1 , n2 , n3 ) ;
resid ( u , v , r , n1 , n2 , n3 , a , k ) ;
}
# 1017 "main.c"
norm2u3 ( r , n1 , n2 , n3 , & rnm2 , & rnmu , nx [ lt ] , ny [ lt ] , nz [ lt ] ) ;
}
timer_stop ( 1 ) ;
# 1021 "main.c"
t = timer_read ( 1 ) ;
# 1023 "main.c"
verified = false ;
verify_value = 0.0 ;
# 1026 "main.c"
printf ( "\n Benchmark completed\n" ) ;
# 1028 "main.c"
epsilon = 1.0e-8 ;
if ( Class != 'U' ) {
if ( Class == 'S' ) {
verify_value = 0.5307707005734e-04 ;
} else if ( Class == 'W' ) {
verify_value = 0.6467329375339e-05 ;
} else if ( Class == 'A' ) {
verify_value = 0.2433365309069e-05 ;
} else if ( Class == 'B' ) {
verify_value = 0.1800564401355e-05 ;
} else if ( Class == 'C' ) {
verify_value = 0.5706732285740e-06 ;
} else if ( Class == 'D' ) {
verify_value = 0.1583275060440e-09 ;
} else if ( Class == 'E' ) {
verify_value = 0.8157592357404e-10 ;
}
# 1046 "main.c"
err = __builtin_fabs ( rnm2 - verify_value ) / verify_value ;
if ( err <= epsilon ) {
verified = true ;
printf ( " VERIFICATION SUCCESSFUL\n" ) ;
printf ( " L2 Norm is %20.13E\n" , rnm2 ) ;
printf ( " Error is %20.13E\n" , err ) ;
} else {
verified = false ;
printf ( " VERIFICATION FAILED\n" ) ;
printf ( " L2 Norm is %20.13E\n" , rnm2 ) ;
printf ( " The correct L2 Norm is %20.13E\n" , verify_value ) ;
}
} else {
verified = false ;
printf ( " Problem size unknown\n" ) ;
printf ( " NO VERIFICATION PERFORMED\n" ) ;
printf ( " L2 Norm is %20.13E\n" , rnm2 ) ;
}
# 1066 "main.c"
nn = 1.0 * nx [ lt ] * ny [ lt ] * nz [ lt ] ;
# 1068 "main.c"
if ( t != 0.0 ) {
mflops = 58.0 * nit * nn * 1.0e-6 / t ;
} else {
mflops = 0.0 ;
}
# 1074 "main.c"
print_results ( "MG" , Class , nx [ lt ] , ny [ lt ] , nz [ lt ] ,
nit , t ,
mflops , " floating point" ,
verified , "3.3.1" , "08 Dec 2017" ,
"icc" , "icc" , "-lm" , "-I../common" , "-O3 -mcmodel=medium" , "-O3 -mcmodel=medium" , "randdp" ) ;
# 1080 "main.c"
if ( timeron ) {
tmax = timer_read ( 1 ) ;
if ( tmax == 0.0 ) tmax = 1.0 ;
# 1087 "main.c"
printf ( " SECTION Time (secs)\n" ) ;
for ( i = 1 ; i < 10 ; i ++ ) {
t = timer_read ( i ) ;
if ( i == 5 ) {
t = timer_read ( 4 ) - t ;
printf ( " --> %8s:%9.3f (%6.2f%%)\n" , "mg-resid" , t , t * 100. / tmax ) ;
} else {
printf ( " %-8s:%9.3f (%6.2f%%)\n" , t_names [ i ] , t , t * 100. / tmax ) ;
}
}
}
acc_shutdown ( acc_device_default ) ;
return 0 ;
}
# 1103 "main.c"
static void setup ( int * n1 , int * n2 , int * n3 )
{
int k , j ;
# 1107 "main.c"
int ax , mi [ ( 10 + 1 ) + 1 ] [ 3 ] ;
int ng [ ( 10 + 1 ) + 1 ] [ 3 ] ;
# 1110 "main.c"
ng [ lt ] [ 0 ] = nx [ lt ] ;
ng [ lt ] [ 1 ] = ny [ lt ] ;
ng [ lt ] [ 2 ] = nz [ lt ] ;
for ( k = lt - 1 ; k >= 1 ; k -- ) {
for ( ax = 0 ; ax < 3 ; ax ++ ) {
ng [ k ] [ ax ] = ng [ k + 1 ] [ ax ] / 2 ;
}
}
for ( k = lt ; k >= 1 ; k -- ) {
nx [ k ] = ng [ k ] [ 0 ] ;
ny [ k ] = ng [ k ] [ 1 ] ;
nz [ k ] = ng [ k ] [ 2 ] ;
}
# 1124 "main.c"
for ( k = lt ; k >= 1 ; k -- ) {
for ( ax = 0 ; ax < 3 ; ax ++ ) {
mi [ k ] [ ax ] = 2 + ng [ k ] [ ax ] ;
}
# 1129 "main.c"
m1 [ k ] = mi [ k ] [ 0 ] ;
m2 [ k ] = mi [ k ] [ 1 ] ;
m3 [ k ] = mi [ k ] [ 2 ] ;
}
# 1134 "main.c"
k = lt ;
is1 = 2 + ng [ k ] [ 0 ] - ng [ lt ] [ 0 ] ;
ie1 = 1 + ng [ k ] [ 0 ] ;
* n1 = 3 + ie1 - is1 ;
is2 = 2 + ng [ k ] [ 1 ] - ng [ lt ] [ 1 ] ;
ie2 = 1 + ng [ k ] [ 1 ] ;
* n2 = 3 + ie2 - is2 ;
is3 = 2 + ng [ k ] [ 2 ] - ng [ lt ] [ 2 ] ;
ie3 = 1 + ng [ k ] [ 2 ] ;
* n3 = 3 + ie3 - is3 ;
# 1145 "main.c"
ir [ lt ] = 0 ;
for ( j = lt - 1 ; j >= 1 ; j -- ) {
ir [ j ] = ir [ j + 1 ] + 1 * m1 [ j + 1 ] * m2 [ j + 1 ] * m3 [ j + 1 ] ;
}
# 1150 "main.c"
if ( debug_vec [ 1 ] >= 1 ) {
printf ( " in setup, \n" ) ;
printf ( " k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n" ) ;
printf ( "%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n" ,
k , lt , ng [ k ] [ 0 ] , ng [ k ] [ 1 ] , ng [ k ] [ 2 ] , * n1 , * n2 , * n3 , is1 , is2 , is3 , ie1 , ie2 , ie3 ) ;
}
}
# 1159 "main.c"
static void mg3P ( double u [ ] , double v [ ] , double r [ ] ,
double a [ 4 ] , double c [ 4 ] , int n1 , int n2 , int n3 )
{
int j , k ;
# 1167 "main.c"
for ( k = lt ; k >= lb + 1 ; k -- ) {
j = k - 1 ;
rprj3 ( & r [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] ,
& r [ ir [ j ] ] , m1 [ j ] , m2 [ j ] , m3 [ j ] , k ) ;
}
k = lb ;
zero3 ( & u [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] ) ;
psinv ( & r [ ir [ k ] ] , & u [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] , c , k ) ;
# 1184 "main.c"
for ( k = lb + 1 ; k <= lt - 1 ; k ++ ) {
j = k - 1 ;
# 1187 "main.c"
zero3 ( & u [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] ) ;
interp ( & u [ ir [ j ] ] , m1 [ j ] , m2 [ j ] , m3 [ j ] , & u [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] , k ) ;
# 1193 "main.c"
resid ( & u [ ir [ k ] ] , & r [ ir [ k ] ] , & r [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] , a , k ) ;
# 1198 "main.c"
psinv ( & r [ ir [ k ] ] , & u [ ir [ k ] ] , m1 [ k ] , m2 [ k ] , m3 [ k ] , c , k ) ;
}
# 1204 "main.c"
j = lt - 1 ;
k = lt ;
interp ( & u [ ir [ j ] ] , m1 [ j ] , m2 [ j ] , m3 [ j ] , u , n1 , n2 , n3 , k ) ;
resid ( u , v , r , n1 , n2 , n3 , a , k ) ;
psinv ( r , u , n1 , n2 , n3 , c , k ) ;
}
# 1212 "main.c"
static void psinv ( double * __restrict__ or , double * __restrict__ ou , int n1 , int n2 , int n3 , double c [ 4 ] , int k )
{
# 1228 "main.c"
int i3 , i2 , i1 ;
double c0 , c1 , c2 ;
# 1232 "main.c"
double * __restrict__ r1 , * __restrict__ r2 ;
# 1235 "main.c"
c0 = c [ 0 ] ;
c1 = c [ 1 ] ;
c2 = c [ 2 ] ;
# 1239 "main.c"
r1 = ( double * ) acc_malloc ( n3 * n2 * n1 * sizeof ( double ) ) ;
r2 = ( double * ) acc_malloc ( n3 * n2 * n1 * sizeof ( double ) ) ;
# 1248 "main.c"
if ( timeron ) timer_start ( 3 ) ;
# 1249 "main.c"
#pragma acc data deviceptr ( r1 , r2 ) present ( ou [ 0 : n3 * n2 * n1 ] ) present ( or [ 0 : n3 * n2 * n1 ] )
# 1252 "main.c"
{
# 1253 "main.c"
#pragma acc parallel loop gang num_gangs ( n3 - 2 ) num_workers ( 16 ) vector_length ( 64 )
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 1255 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 1257 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
# 1264 "main.c"
( r1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( or [ ( i3 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( or [ ( i3 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] )
+ ( or [ ( i3 - 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) + ( or [ ( i3 + 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
( r2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( or [ ( i3 - 1 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( or [ ( i3 - 1 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] )
+ ( or [ ( i3 + 1 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( or [ ( i3 + 1 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] ) ;
# 1270 "main.c"
}
}
}
# 1273 "main.c"
#pragma acc parallel loop gang num_gangs ( n3 - 2 ) num_workers ( 16 ) vector_length ( 64 )
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 1275 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 1277 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 < n1 - 1 ; i1 ++ ) {
# 1285 "main.c"
( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] )
+ c0 * ( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] )
+ c1 * ( ( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 - 1 ) ] )
+ ( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 + 1 ) ] )
+ ( r1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) )
+ c2 * ( ( r2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] )
+ ( r1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 - 1 ) ] )
+ ( r1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 + 1 ) ] ) ) ;
# 1296 "main.c"
}
}
}
}
if ( timeron ) timer_stop ( 3 ) ;
acc_free ( r1 ) ;
acc_free ( r2 ) ;
# 1314 "main.c"
comm3 ( ou , n1 , n2 , n3 , k ) ;
# 1320 "main.c"
if ( debug_vec [ 0 ] >= 1 ) {
rep_nrm ( ou , n1 , n2 , n3 , " psinv" , k ) ;
}
# 1325 "main.c"
if ( debug_vec [ 3 ] >= k ) {
showall ( ou , n1 , n2 , n3 ) ;
}
}
# 1332 "main.c"
static void resid ( double * ou , double * ov , double * or , int n1 , int n2 , int n3 ,
double a [ 4 ] , int k )
{
# 1351 "main.c"
int i3 , i2 , i1 ;
double a0 , a2 , a3 ;
double * __restrict__ u1 , * __restrict__ u2 ;
# 1357 "main.c"
a0 = a [ 0 ] ;
a2 = a [ 2 ] ;
a3 = a [ 3 ] ;
# 1361 "main.c"
if ( timeron ) timer_start ( 4 ) ;
u1 = ( double * ) acc_malloc ( n3 * n2 * n1 * sizeof ( double ) ) ;
u2 = ( double * ) acc_malloc ( n3 * n2 * n1 * sizeof ( double ) ) ;
# 1370 "main.c"
# 1371 "main.c"
#pragma acc data deviceptr ( u1 , u2 ) present ( ou [ 0 : n3 * n2 * n1 ] ) present ( ov [ 0 : n3 * n2 * n1 ] , or [ 0 : n3 * n2 * n1 ] )
# 1374 "main.c"
{
#pragma acc parallel num_gangs ( n3 - 2 ) num_workers ( 8 ) vector_length ( 128 )
{
# 1377 "main.c"
#pragma acc loop gang
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 1379 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 1381 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
# 1388 "main.c"
( u1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( ou [ ( i3 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] )
+ ( ou [ ( i3 - 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) + ( ou [ ( i3 + 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
( u2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ou [ ( i3 - 1 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( ou [ ( i3 - 1 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] )
+ ( ou [ ( i3 + 1 ) * n2 * n1 + ( i2 - 1 ) * n1 + ( i1 ) ] ) + ( ou [ ( i3 + 1 ) * n2 * n1 + ( i2 + 1 ) * n1 + ( i1 ) ] ) ;
# 1394 "main.c"
}
}
}
}
#pragma acc parallel num_gangs ( n3 - 2 ) num_workers ( 8 ) vector_length ( 128 )
{
# 1400 "main.c"
#pragma acc loop gang
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 1402 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 1404 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 < n1 - 1 ; i1 ++ ) {
# 1409 "main.c"
( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ov [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] )
- a0 * ( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] )
# 1413 "main.c"
- a2 * ( ( u2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) + ( u1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 - 1 ) ] )
+ ( u1 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 + 1 ) ] ) )
- a3 * ( ( u2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 - 1 ) ] ) + ( u2 [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 + 1 ) ] ) ) ;
}
}
}
}
}
acc_free ( u1 ) ;
acc_free ( u2 ) ;
# 1437 "main.c"
if ( timeron ) timer_stop ( 4 ) ;
# 1439 "main.c"
comm3 ( or , n1 , n2 , n3 , k ) ;
# 1445 "main.c"
if ( debug_vec [ 0 ] >= 1 ) {
rep_nrm ( or , n1 , n2 , n3 , " resid" , k ) ;
}
# 1450 "main.c"
if ( debug_vec [ 2 ] >= k ) {
showall ( or , n1 , n2 , n3 ) ;
}
}
# 1457 "main.c"
static void rprj3 ( double * or , int m1k , int m2k , int m3k ,
double * os , int m1j , int m2j , int m3j , int k )
{
# 1472 "main.c"
int j3 , j2 , j1 , i3 , i2 , i1 , d1 , d2 , d3 , j ;
# 1475 "main.c"
double * x1 , * y1 , x2 , y2 ;
x1 = ( double * ) acc_malloc ( m3k * m2k * m1k * sizeof ( double ) ) ;
y1 = ( double * ) acc_malloc ( m3k * m2k * m1k * sizeof ( double ) ) ;
# 1485 "main.c"
if ( timeron ) timer_start ( 6 ) ;
if ( m1k == 3 ) {
d1 = 2 ;
} else {
d1 = 1 ;
}
# 1492 "main.c"
if ( m2k == 3 ) {
d2 = 2 ;
} else {
d2 = 1 ;
}
# 1498 "main.c"
if ( m3k == 3 ) {
d3 = 2 ;
} else {
d3 = 1 ;
}
# 1504 "main.c"
# 1504 "main.c"
#pragma acc data deviceptr ( x1 , y1 ) present ( or [ 0 : m3k * m2k * m1k ] ) present ( os [ 0 : m3j * m2j * m1j ] )
# 1507 "main.c"
{
# 1508 "main.c"
#pragma acc parallel loop gang num_gangs ( m3j - 2 ) num_workers ( 8 ) vector_length ( 128 )
for ( j3 = 1 ; j3 < m3j - 1 ; j3 ++ ) {
i3 = 2 * j3 - d3 ;
# 1511 "main.c"
#pragma acc loop worker
for ( j2 = 1 ; j2 < m2j - 1 ; j2 ++ ) {
i2 = 2 * j2 - d2 ;
# 1514 "main.c"
#pragma acc loop vector
for ( j1 = 1 ; j1 < m1j ; j1 ++ ) {
i1 = 2 * j1 - d1 ;
# 1522 "main.c"
( x1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) = ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) + ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 ) ] )
+ ( or [ ( i3 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 ) ] ) ;
( y1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) = ( or [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] )
+ ( or [ ( i3 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 ) ] ) ;
}
}
}
# 1531 "main.c"
#pragma acc parallel loop gang num_gangs ( m3j - 2 ) num_workers ( 8 ) vector_length ( 128 )
for ( j3 = 1 ; j3 < m3j - 1 ; j3 ++ ) {
i3 = 2 * j3 - d3 ;
# 1534 "main.c"
#pragma acc loop worker
for ( j2 = 1 ; j2 < m2j - 1 ; j2 ++ ) {
i2 = 2 * j2 - d2 ;
# 1537 "main.c"
#pragma acc loop vector
for ( j1 = 1 ; j1 < m1j - 1 ; j1 ++ ) {
i1 = 2 * j1 - d1 ;
# 1550 "main.c"
y2 = ( or [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 + 1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 ) * m1k + ( i1 + 1 ) ] )
+ ( or [ ( i3 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 + 1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 + 1 ) ] ) ;
x2 = ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 ) * m1k + ( i1 + 1 ) ] ) + ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 + 2 ) * m1k + ( i1 + 1 ) ] )
+ ( or [ ( i3 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 + 1 ) ] ) + ( or [ ( i3 + 2 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 + 1 ) ] ) ;
( os [ ( j3 ) * m2j * m1j + ( j2 ) * m1j + ( j1 ) ] ) =
0.5 * ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 + 1 ) ] )
+ 0.25 * ( ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 ) ] ) + ( or [ ( i3 + 1 ) * m2k * m1k + ( i2 + 1 ) * m1k + ( i1 + 2 ) ] ) + x2 )
+ 0.125 * ( ( x1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) + ( x1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 + 2 ) ] ) + y2 )
+ 0.0625 * ( ( y1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 ) ] ) + ( y1 [ ( i3 ) * m2k * m1k + ( i2 ) * m1k + ( i1 + 2 ) ] ) ) ;
}
}
}
}
if ( timeron ) timer_stop ( 6 ) ;
# 1566 "main.c"
j = k - 1 ;
comm3 ( os , m1j , m2j , m3j , j ) ;
# 1570 "main.c"
if ( debug_vec [ 0 ] >= 1 ) {
rep_nrm ( os , m1j , m2j , m3j , " rprj3" , k - 1 ) ;
}
# 1575 "main.c"
if ( debug_vec [ 4 ] >= k ) {
showall ( os , m1j , m2j , m3j ) ;
}
acc_free ( x1 ) ;
acc_free ( y1 ) ;
# 1586 "main.c"
}
# 1589 "main.c"
static void interp ( double * oz , int mm1 , int mm2 , int mm3 ,
double * ou , int n1 , int n2 , int n3 , int k )
{
# 1605 "main.c"
int i3 , i2 , i1 , d1 , d2 , d3 , t1 , t2 , t3 ;
# 1608 "main.c"
static double * z1 , * z2 , * z3 ;
# 1615 "main.c"
z1 = ( double * ) acc_malloc ( mm3 * mm2 * mm1 * sizeof ( double ) ) ;
z2 = ( double * ) acc_malloc ( mm3 * mm2 * mm1 * sizeof ( double ) ) ;
z3 = ( double * ) acc_malloc ( mm3 * mm2 * mm1 * sizeof ( double ) ) ;
# 1628 "main.c"
if ( timeron ) timer_start ( 7 ) ;
# 1630 "main.c"
# 1630 "main.c"
#pragma acc data deviceptr ( z1 , z2 , z3 ) present ( oz [ 0 : mm3 * mm2 * mm1 ] ) present ( ou [ 0 : n3 * n2 * n1 ] )
# 1633 "main.c"
{
# 1635 "main.c"
if ( n1 != 3 && n2 != 3 && n3 != 3 ) {
# 1637 "main.c"
# 1637 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 0 ; i3 < mm3 - 1 ; i3 ++ ) {
# 1639 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < mm2 - 1 ; i2 ++ ) {
# 1641 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < mm1 ; i1 ++ ) {
# 1647 "main.c"
( z1 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) = ( oz [ ( i3 ) * mm2 * mm1 + ( i2 + 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( z2 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) = ( oz [ ( i3 + 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( z3 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) = ( oz [ ( i3 + 1 ) * mm2 * mm1 + ( i2 + 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 + 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( z1 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
# 1656 "main.c"
}
}
}
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 0 ; i3 < mm3 - 1 ; i3 ++ ) {
# 1661 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < mm2 - 1 ; i2 ++ ) {
# 1663 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < mm1 - 1 ; i1 ++ ) {
# 1670 "main.c"
( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 ) ] ) = ( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 + 1 ) ] ) = ( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 + 1 ) ] )
+ 0.5 * ( ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 + 1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ) ;
}
}
}
# 1680 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 0 ; i3 < mm3 - 1 ; i3 ++ ) {
# 1682 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < mm2 - 1 ; i2 ++ ) {
# 1684 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < mm1 - 1 ; i1 ++ ) {
# 1691 "main.c"
( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 ) ] ) = ( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 ) ] )
+ 0.5 * ( z1 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 + 1 ) ] ) = ( ou [ ( 2 * i3 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 + 1 ) ] )
+ 0.25 * ( ( z1 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( z1 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 + 1 ) ] ) ) ;
}
}
}
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 0 ; i3 < mm3 - 1 ; i3 ++ ) {
# 1702 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < mm2 - 1 ; i2 ++ ) {
# 1704 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < mm1 - 1 ; i1 ++ ) {
# 1711 "main.c"
( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 ) ] ) = ( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 ) ] )
+ 0.5 * ( z2 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 + 1 ) ] ) = ( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 ) * n1 + ( 2 * i1 + 1 ) ] )
+ 0.25 * ( ( z2 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( z2 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 + 1 ) ] ) ) ;
}
}
}
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 0 ; i3 < mm3 - 1 ; i3 ++ ) {
# 1723 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < mm2 - 1 ; i2 ++ ) {
# 1725 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < mm1 - 1 ; i1 ++ ) {
# 1732 "main.c"
( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 ) ] ) = ( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 ) ] )
+ 0.25 * ( z3 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] ) ;
( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 + 1 ) ] ) = ( ou [ ( 2 * i3 + 1 ) * n2 * n1 + ( 2 * i2 + 1 ) * n1 + ( 2 * i1 + 1 ) ] )
+ 0.125 * ( ( z3 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( z3 [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 + 1 ) ] ) ) ;
}
}
}
} else {
if ( n1 == 3 ) {
d1 = 2 ;
t1 = 1 ;
} else {
d1 = 1 ;
t1 = 0 ;
}
# 1750 "main.c"
if ( n2 == 3 ) {
d2 = 2 ;
t2 = 1 ;
} else {
d2 = 1 ;
t2 = 0 ;
}
# 1758 "main.c"
if ( n3 == 3 ) {
d3 = 2 ;
t3 = 1 ;
} else {
d3 = 1 ;
t3 = 0 ;
}
# 1766 "main.c"
# 1766 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - d3 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = d3 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1768 "main.c"
#pragma acc loop worker
for ( i2 = d2 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1770 "main.c"
#pragma acc loop vector
for ( i1 = d1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1776 "main.c"
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] ) =
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] )
+ ( ou [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ;
}
}
}
# 1783 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - d3 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = d3 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1785 "main.c"
#pragma acc loop worker
for ( i2 = d2 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1787 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1793 "main.c"
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] ) =
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] )
+ 0.5 * ( ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1801 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - d3 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = d3 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1803 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1805 "main.c"
#pragma acc loop vector
for ( i1 = d1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1811 "main.c"
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] ) =
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] )
+ 0.5 * ( ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1819 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - d3 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = d3 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1821 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1823 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1830 "main.c"
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] ) =
( ou [ ( 2 * i3 - d3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] )
+ 0.25 * ( ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1841 "main.c"
# 1841 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 1 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1843 "main.c"
#pragma acc loop worker
for ( i2 = d2 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1845 "main.c"
#pragma acc loop vector
for ( i1 = d1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1851 "main.c"
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] ) =
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] )
+ 0.5 * ( ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1859 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 1 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1861 "main.c"
#pragma acc loop worker
for ( i2 = d2 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1863 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1870 "main.c"
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] ) =
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - d2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] )
+ 0.25 * ( ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1880 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 1 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1882 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1884 "main.c"
#pragma acc loop vector
for ( i1 = d1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1891 "main.c"
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] ) =
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - d1 - 1 ) ] )
+ 0.25 * ( ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1901 "main.c"
#pragma acc parallel loop gang num_gangs ( mm3 - 1 ) num_workers ( 8 ) vector_length ( 128 )
for ( i3 = 1 ; i3 <= mm3 - 1 ; i3 ++ ) {
# 1903 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 <= mm2 - 1 ; i2 ++ ) {
# 1905 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 <= mm1 - 1 ; i1 ++ ) {
# 1914 "main.c"
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] ) =
( ou [ ( 2 * i3 - t3 - 1 ) * n2 * n1 + ( 2 * i2 - t2 - 1 ) * n1 + ( 2 * i1 - t1 - 1 ) ] )
+ 0.125 * ( ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 ) * mm1 + ( i1 - 1 ) ] )
+ ( oz [ ( i3 - 1 ) * mm2 * mm1 + ( i2 - 1 ) * mm1 + ( i1 - 1 ) ] ) ) ;
}
}
}
# 1929 "main.c"
}
}
# 1932 "main.c"
acc_free ( z1 ) ;
acc_free ( z2 ) ;
acc_free ( z3 ) ;
# 1941 "main.c"
if ( timeron ) timer_stop ( 7 ) ;
# 1943 "main.c"
if ( debug_vec [ 0 ] >= 1 ) {
rep_nrm ( oz , mm1 , mm2 , mm3 , "z: inter" , k - 1 ) ;
rep_nrm ( ou , n1 , n2 , n3 , "u: inter" , k ) ;
}
# 1950 "main.c"
if ( debug_vec [ 5 ] >= k ) {
showall ( oz , mm1 , mm2 , mm3 ) ;
showall ( ou , n1 , n2 , n3 ) ;
}
}
# 1959 "main.c"
static void norm2u3 ( double * or , int n1 , int n2 , int n3 ,
double * rnm2 , double * rnmu ,
int nx , int ny , int nz )
{
# 1972 "main.c"
double s , a ;
double temp ;
int i3 , i2 , i1 ;
# 1976 "main.c"
double dn ;
# 1978 "main.c"
if ( timeron ) timer_start ( 8 ) ;
dn = 1.0 * nx * ny * nz ;
# 1981 "main.c"
s = 0.0 ;
* rnmu = 0.0 ;
temp = * rnmu ;
#pragma acc data pcopyin ( or [ 0 : n3 * n2 * n1 ] )
{
# 1986 "main.c"
#pragma acc parallel loop gang reduction ( + : s ) reduction ( max : temp ) num_gangs ( n3 - 2 ) num_workers ( 8 ) vector_length ( 128 )
# 1988 "main.c"
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 1989 "main.c"
#pragma acc loop worker
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 1991 "main.c"
#pragma acc loop vector
for ( i1 = 1 ; i1 < n1 - 1 ; i1 ++ ) {
s = s + __builtin_pow ( ( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) , 2.0 ) ;
a = __builtin_fabs ( ( or [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ) ;
temp = ( ( temp ) > ( a ) ? ( temp ) : ( a ) ) ;
}
}
}
}
* rnmu = temp ;
* rnm2 = __builtin_sqrt ( s / dn ) ;
if ( timeron ) timer_stop ( 8 ) ;
}
# 2009 "main.c"
static void rep_nrm ( double * u , int n1 , int n2 , int n3 , char * title , int kk )
{
double rnm2 , rnmu ;
# 2016 "main.c"
norm2u3 ( u , n1 , n2 , n3 , & rnm2 , & rnmu , nx [ kk ] , ny [ kk ] , nz [ kk ] ) ;
printf ( " Level%2d in %8s: norms =%21.14E%21.14E\n" , kk , title , rnm2 , rnmu ) ;
}
# 2021 "main.c"
static void comm3 ( double * ou , int n1 , int n2 , int n3 , int kk )
{
# 2028 "main.c"
int i1 , i2 , i3 ;
if ( timeron ) timer_start ( 9 ) ;
# 2030 "main.c"
#pragma acc data present ( ou [ 0 : n3 * n2 * n1 ] )
{
# 2032 "main.c"
#pragma acc parallel loop gang num_gangs ( n3 - 2 ) vector_length ( 128 )
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 2034 "main.c"
#pragma acc loop vector
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
# 2039 "main.c"
( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( 0 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( n1 - 2 ) ] ) ;
( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( n1 - 1 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( 1 ) ] ) ;
}
}
# 2045 "main.c"
# 2045 "main.c"
#pragma acc parallel loop gang num_gangs ( n3 - 2 ) vector_length ( 128 )
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
# 2047 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
# 2052 "main.c"
( ou [ ( i3 ) * n2 * n1 + ( 0 ) * n1 + ( i1 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( n2 - 2 ) * n1 + ( i1 ) ] ) ;
( ou [ ( i3 ) * n2 * n1 + ( n2 - 1 ) * n1 + ( i1 ) ] ) = ( ou [ ( i3 ) * n2 * n1 + ( 1 ) * n1 + ( i1 ) ] ) ;
}
}
# 2058 "main.c"
# 2058 "main.c"
#pragma acc parallel loop gang num_gangs ( n2 ) vector_length ( 128 )
for ( i2 = 0 ; i2 < n2 ; i2 ++ ) {
# 2060 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
# 2065 "main.c"
( ou [ ( 0 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ou [ ( n3 - 2 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
( ou [ ( n3 - 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = ( ou [ ( 1 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
}
}
}
if ( timeron ) timer_stop ( 9 ) ;
}
# 2075 "main.c"
inline double randlc ( double * x , double a )
{
const double r23 = 1.1920928955078125e-07 ;
const double r46 = r23 * r23 ;
const double t23 = 8.388608e+06 ;
const double t46 = t23 * t23 ;
# 2082 "main.c"
double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ;
double r ;
t1 = r23 * a ;
a1 = ( int ) t1 ;
a2 = a - t23 * a1 ;
t1 = r23 * ( * x ) ;
x1 = ( int ) t1 ;
x2 = * x - t23 * x1 ;
t1 = a1 * x2 + a2 * x1 ;
t2 = ( int ) ( r23 * t1 ) ;
z = t1 - t23 * t2 ;
t3 = t23 * z + a2 * x2 ;
t4 = ( int ) ( r46 * t3 ) ;
* x = t3 - t46 * t4 ;
r = r46 * ( * x ) ;
return r ;
}
# 2102 "main.c"
inline void vranlc ( int n , double * x , double a , double y [ ] )
{
const double r23 = 1.1920928955078125e-07 ;
const double r46 = r23 * r23 ;
const double t23 = 8.388608e+06 ;
const double t46 = t23 * t23 ;
# 2110 "main.c"
double t1 , t2 , t3 , t4 , a1 , a2 , x1 , x2 , z ;
# 2112 "main.c"
int i ;
t1 = r23 * a ;
a1 = ( int ) t1 ;
a2 = a - t23 * a1 ;
for ( i = 0 ; i < n ; i ++ ) {
t1 = r23 * ( * x ) ;
x1 = ( int ) t1 ;
x2 = * x - t23 * x1 ;
t1 = a1 * x2 + a2 * x1 ;
t2 = ( int ) ( r23 * t1 ) ;
z = t1 - t23 * t2 ;
t3 = t23 * z + a2 * x2 ;
t4 = ( int ) ( r46 * t3 ) ;
* x = t3 - t46 * t4 ;
y [ i ] = r46 * ( * x ) ;
}
}
# 2130 "main.c"
static void zran3 ( double * oz , int n1 , int n2 , int n3 , int nx , int ny , int k )
{
# 2139 "main.c"
int i0 , m0 , m1 ;
# 2141 "main.c"
int i1 , i2 , i3 , d1 , e1 , e2 , e3 ;
double xx , x0 , x1 , a1 , a2 , ai ;
# 2144 "main.c"
const int mm = 10 ;
const double a = __builtin_pow ( 5.0 , 13.0 ) ;
const double x = 314159265.0 ;
double ten [ mm ] [ 2 ] , best ;
int i , j1 [ mm ] [ 2 ] , j2 [ mm ] [ 2 ] , j3 [ mm ] [ 2 ] ;
int jg [ 4 ] [ mm ] [ 2 ] ;
# 2151 "main.c"
double rdummy ;
# 2153 "main.c"
a1 = power ( a , nx ) ;
a2 = power ( a , nx * ny ) ;
# 2156 "main.c"
zero3 ( oz , n1 , n2 , n3 ) ;
i = is1 - 2 + nx * ( is2 - 2 + ny * ( is3 - 2 ) ) ;
# 2161 "main.c"
ai = power ( a , i ) ;
d1 = ie1 - is1 + 1 ;
e1 = ie1 - is1 + 2 ;
e2 = ie2 - is2 + 2 ;
e3 = ie3 - is3 + 2 ;
x0 = x ;
rdummy = randlc ( & x0 , ai ) ;
# 2169 "main.c"
for ( i3 = 1 ; i3 < e3 ; i3 ++ ) {
x1 = x0 ;
for ( i2 = 1 ; i2 < e2 ; i2 ++ ) {
xx = x1 ;
vranlc ( d1 , & xx , a , & ( ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( 1 ) ] ) ) ) ;
rdummy = randlc ( & x1 , a1 ) ;
}
rdummy = randlc ( & x0 , a2 ) ;
}
# 2180 "main.c"
# 2185 "main.c"
for ( i = 0 ; i < mm ; i ++ ) {
ten [ i ] [ 1 ] = 0.0 ;
j1 [ i ] [ 1 ] = 0 ;
j2 [ i ] [ 1 ] = 0 ;
j3 [ i ] [ 1 ] = 0 ;
ten [ i ] [ 0 ] = 1.0 ;
j1 [ i ] [ 0 ] = 0 ;
j2 [ i ] [ 0 ] = 0 ;
j3 [ i ] [ 0 ] = 0 ;
}
for ( i3 = 1 ; i3 < n3 - 1 ; i3 ++ ) {
for ( i2 = 1 ; i2 < n2 - 1 ; i2 ++ ) {
for ( i1 = 1 ; i1 < n1 - 1 ; i1 ++ ) {
if ( ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) > ten [ 0 ] [ 1 ] ) {
ten [ 0 ] [ 1 ] = ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
j1 [ 0 ] [ 1 ] = i1 ;
j2 [ 0 ] [ 1 ] = i2 ;
j3 [ 0 ] [ 1 ] = i3 ;
bubble ( ten , j1 , j2 , j3 , mm , 1 ) ;
}
if ( ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) < ten [ 0 ] [ 0 ] ) {
ten [ 0 ] [ 0 ] = ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) ;
j1 [ 0 ] [ 0 ] = i1 ;
j2 [ 0 ] [ 0 ] = i2 ;
j3 [ 0 ] [ 0 ] = i3 ;
bubble ( ten , j1 , j2 , j3 , mm , 0 ) ;
}
}
}
}
# 2225 "main.c"
i1 = mm - 1 ;
i0 = mm - 1 ;
for ( i = mm - 1 ; i >= 0 ; i -- ) {
best = 0.0 ;
if ( best < ten [ i1 ] [ 1 ] ) {
jg [ 0 ] [ i ] [ 1 ] = 0 ;
jg [ 1 ] [ i ] [ 1 ] = is1 - 2 + j1 [ i1 ] [ 1 ] ;
jg [ 2 ] [ i ] [ 1 ] = is2 - 2 + j2 [ i1 ] [ 1 ] ;
jg [ 3 ] [ i ] [ 1 ] = is3 - 2 + j3 [ i1 ] [ 1 ] ;
i1 = i1 - 1 ;
} else {
jg [ 0 ] [ i ] [ 1 ] = 0 ;
jg [ 1 ] [ i ] [ 1 ] = 0 ;
jg [ 2 ] [ i ] [ 1 ] = 0 ;
jg [ 3 ] [ i ] [ 1 ] = 0 ;
}
# 2245 "main.c"
best = 1.0 ;
if ( best > ten [ i0 ] [ 0 ] ) {
jg [ 0 ] [ i ] [ 0 ] = 0 ;
jg [ 1 ] [ i ] [ 0 ] = is1 - 2 + j1 [ i0 ] [ 0 ] ;
jg [ 2 ] [ i ] [ 0 ] = is2 - 2 + j2 [ i0 ] [ 0 ] ;
jg [ 3 ] [ i ] [ 0 ] = is3 - 2 + j3 [ i0 ] [ 0 ] ;
i0 = i0 - 1 ;
} else {
jg [ 0 ] [ i ] [ 0 ] = 0 ;
jg [ 1 ] [ i ] [ 0 ] = 0 ;
jg [ 2 ] [ i ] [ 0 ] = 0 ;
jg [ 3 ] [ i ] [ 0 ] = 0 ;
}
# 2259 "main.c"
}
m1 = 0 ;
m0 = 0 ;
# 2265 "main.c"
# 2308 "main.c"
# 2310 "main.c"
for ( i3 = 0 ; i3 < n3 ; i3 ++ ) {
for ( i2 = 0 ; i2 < n2 ; i2 ++ ) {
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = 0.0 ;
}
}
}
for ( i = mm - 1 ; i >= m0 ; i -- ) {
i3 = jg [ 3 ] [ i ] [ 0 ] ;
i2 = jg [ 2 ] [ i ] [ 0 ] ;
i1 = jg [ 1 ] [ i ] [ 0 ] ;
( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = - 1.0 ;
}
# 2327 "main.c"
for ( i = mm - 1 ; i >= m1 ; i -- ) {
i3 = jg [ 3 ] [ i ] [ 1 ] ;
i2 = jg [ 2 ] [ i ] [ 1 ] ;
i1 = jg [ 1 ] [ i ] [ 1 ] ;
( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = + 1.0 ;
}
# 2334 "main.c"
#pragma acc update device ( oz [ 0 : n3 * n2 * n1 ] )
# 2336 "main.c"
comm3 ( oz , n1 , n2 , n3 , k ) ;
# 2339 "main.c"
}
# 2345 "main.c"
static void showall ( double * oz , int n1 , int n2 , int n3 )
{
# 2349 "main.c"
int i1 , i2 , i3 ;
int m1 , m2 , m3 ;
# 2352 "main.c"
m1 = ( ( n1 ) < ( 18 ) ? ( n1 ) : ( 18 ) ) ;
m2 = ( ( n2 ) < ( 14 ) ? ( n2 ) : ( 14 ) ) ;
m3 = ( ( n3 ) < ( 18 ) ? ( n3 ) : ( 18 ) ) ;
# 2356 "main.c"
printf ( " \n" ) ;
for ( i3 = 0 ; i3 < m3 ; i3 ++ ) {
for ( i1 = 0 ; i1 < m1 ; i1 ++ ) {
for ( i2 = 0 ; i2 < m2 ; i2 ++ ) {
printf ( "%6.3f" , ( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i2 ) ] ) ) ;
}
printf ( "\n" ) ;
}
printf ( " - - - - - - - \n" ) ;
}
printf ( " \n" ) ;
}
# 2371 "main.c"
static double power ( double a , int n )
{
double aj ;
int nj ;
double rdummy ;
double power ;
# 2382 "main.c"
power = 1.0 ;
nj = n ;
aj = a ;
# 2386 "main.c"
while ( nj != 0 ) {
if ( ( nj % 2 ) == 1 ) rdummy = randlc ( & power , aj ) ;
rdummy = randlc ( & aj , aj ) ;
nj = nj / 2 ;
}
# 2392 "main.c"
return power ;
}
# 2396 "main.c"
static void bubble ( double ten [ ] [ 2 ] , int j1 [ ] [ 2 ] , int j2 [ ] [ 2 ] , int j3 [ ] [ 2 ] ,
int m , int ind )
{
double temp ;
int i , j_temp ;
# 2405 "main.c"
if ( ind == 1 ) {
for ( i = 0 ; i < m - 1 ; i ++ ) {
if ( ten [ i ] [ ind ] > ten [ i + 1 ] [ ind ] ) {
temp = ten [ i + 1 ] [ ind ] ;
ten [ i + 1 ] [ ind ] = ten [ i ] [ ind ] ;
ten [ i ] [ ind ] = temp ;
# 2412 "main.c"
j_temp = j1 [ i + 1 ] [ ind ] ;
j1 [ i + 1 ] [ ind ] = j1 [ i ] [ ind ] ;
j1 [ i ] [ ind ] = j_temp ;
# 2416 "main.c"
j_temp = j2 [ i + 1 ] [ ind ] ;
j2 [ i + 1 ] [ ind ] = j2 [ i ] [ ind ] ;
j2 [ i ] [ ind ] = j_temp ;
# 2420 "main.c"
j_temp = j3 [ i + 1 ] [ ind ] ;
j3 [ i + 1 ] [ ind ] = j3 [ i ] [ ind ] ;
j3 [ i ] [ ind ] = j_temp ;
} else {
return ;
}
}
} else {
for ( i = 0 ; i < m - 1 ; i ++ ) {
if ( ten [ i ] [ ind ] < ten [ i + 1 ] [ ind ] ) {
# 2431 "main.c"
temp = ten [ i + 1 ] [ ind ] ;
ten [ i + 1 ] [ ind ] = ten [ i ] [ ind ] ;
ten [ i ] [ ind ] = temp ;
# 2435 "main.c"
j_temp = j1 [ i + 1 ] [ ind ] ;
j1 [ i + 1 ] [ ind ] = j1 [ i ] [ ind ] ;
j1 [ i ] [ ind ] = j_temp ;
# 2439 "main.c"
j_temp = j2 [ i + 1 ] [ ind ] ;
j2 [ i + 1 ] [ ind ] = j2 [ i ] [ ind ] ;
j2 [ i ] [ ind ] = j_temp ;
# 2443 "main.c"
j_temp = j3 [ i + 1 ] [ ind ] ;
j3 [ i + 1 ] [ ind ] = j3 [ i ] [ ind ] ;
j3 [ i ] [ ind ] = j_temp ;
} else {
return ;
}
}
}
}
# 2454 "main.c"
static void zero3 ( double * oz , int n1 , int n2 , int n3 )
{
# 2458 "main.c"
int i1 , i2 , i3 ;
# 2460 "main.c"
#pragma acc parallel present ( oz [ 0 : n3 * n2 * n1 ] ) num_gangs ( n3 ) num_workers ( 8 ) vector_length ( 128 )
{
# 2462 "main.c"
#pragma acc loop gang
for ( i3 = 0 ; i3 < n3 ; i3 ++ ) {
# 2464 "main.c"
#pragma acc loop worker
for ( i2 = 0 ; i2 < n2 ; i2 ++ ) {
# 2466 "main.c"
#pragma acc loop vector
for ( i1 = 0 ; i1 < n1 ; i1 ++ ) {
( oz [ ( i3 ) * n2 * n1 + ( i2 ) * n1 + ( i1 ) ] ) = 0.0 ;
}
}
}
}
}
|
omp-expand.c | /* Expansion pass for OMP directives. Outlines regions of certain OMP
directives to separate functions, converts others into explicit calls to the
runtime library (libgomp) and so forth
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "memmodel.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "optabs.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "internal-fn.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-ssa.h"
#include "splay-tree.h"
#include "cfgloop.h"
#include "omp-general.h"
#include "omp-offload.h"
#include "tree-cfgcleanup.h"
#include "alloc-pool.h"
#include "symbol-summary.h"
#include "gomp-constants.h"
#include "gimple-pretty-print.h"
#include "stringpool.h"
#include "attribs.h"
#include "tree-eh.h"
/* OMP region information. Every parallel and workshare
directive is enclosed between two markers, the OMP_* directive
and a corresponding GIMPLE_OMP_RETURN statement. */
struct omp_region
{
/* The enclosing region. */
struct omp_region *outer;
/* First child region. */
struct omp_region *inner;
/* Next peer region. */
struct omp_region *next;
/* Block containing the omp directive as its last stmt. */
basic_block entry;
/* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
basic_block exit;
/* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
basic_block cont;
/* If this is a combined parallel+workshare region, this is a list
of additional arguments needed by the combined parallel+workshare
library call. */
vec<tree, va_gc> *ws_args;
/* The code for the omp directive of this region. */
enum gimple_code type;
/* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
enum omp_clause_schedule_kind sched_kind;
/* Schedule modifiers. */
unsigned char sched_modifiers;
/* True if this is a combined parallel+workshare region. */
bool is_combined_parallel;
/* Copy of fd.lastprivate_conditional != 0. */
bool has_lastprivate_conditional;
/* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
a depend clause. */
gomp_ordered *ord_stmt;
};
static struct omp_region *root_omp_region;
static bool omp_any_child_fn_dumped;
static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
bool = false);
static gphi *find_phi_with_arg_on_edge (tree, edge);
static void expand_omp (struct omp_region *region);
/* Return true if REGION is a combined parallel+workshare region. */
static inline bool
is_combined_parallel (struct omp_region *region)
{
return region->is_combined_parallel;
}
/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
is the immediate dominator of PAR_ENTRY_BB, return true if there
are no data dependencies that would prevent expanding the parallel
directive at PAR_ENTRY_BB as a combined parallel+workshare region.
When expanding a combined parallel+workshare region, the call to
the child function may need additional arguments in the case of
GIMPLE_OMP_FOR regions. In some cases, these arguments are
computed out of variables passed in from the parent to the child
via 'struct .omp_data_s'. For instance:
#pragma omp parallel for schedule (guided, i * 4)
for (j ...)
Is lowered into:
# BLOCK 2 (PAR_ENTRY_BB)
.omp_data_o.i = i;
#pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
# BLOCK 3 (WS_ENTRY_BB)
.omp_data_i = &.omp_data_o;
D.1667 = .omp_data_i->i;
D.1598 = D.1667 * 4;
#pragma omp for schedule (guided, D.1598)
When we outline the parallel region, the call to the child function
'bar.omp_fn.0' will need the value D.1598 in its argument list, but
that value is computed *after* the call site. So, in principle we
cannot do the transformation.
To see whether the code in WS_ENTRY_BB blocks the combined
parallel+workshare call, we collect all the variables used in the
GIMPLE_OMP_FOR header check whether they appear on the LHS of any
statement in WS_ENTRY_BB. If so, then we cannot emit the combined
call.
FIXME. If we had the SSA form built at this point, we could merely
hoist the code in block 3 into block 2 and be done with it. But at
this point we don't have dataflow information and though we could
hack something up here, it is really not worth the aggravation. */
static bool
workshare_safe_to_combine_p (basic_block ws_entry_bb)
{
struct omp_for_data fd;
gimple *ws_stmt = last_stmt (ws_entry_bb);
if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
return true;
gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
if (gimple_omp_for_kind (ws_stmt) != GF_OMP_FOR_KIND_FOR)
return false;
omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
return false;
if (fd.iter_type != long_integer_type_node)
return false;
/* FIXME. We give up too easily here. If any of these arguments
are not constants, they will likely involve variables that have
been mapped into fields of .omp_data_s for sharing with the child
function. With appropriate data flow, it would be possible to
see through this. */
if (!is_gimple_min_invariant (fd.loop.n1)
|| !is_gimple_min_invariant (fd.loop.n2)
|| !is_gimple_min_invariant (fd.loop.step)
|| (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
return false;
return true;
}
/* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
presence (SIMD_SCHEDULE). */
static tree
omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
{
if (!simd_schedule || integer_zerop (chunk_size))
return chunk_size;
poly_uint64 vf = omp_max_vf ();
if (known_eq (vf, 1U))
return chunk_size;
tree type = TREE_TYPE (chunk_size);
chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
build_int_cst (type, vf - 1));
return fold_build2 (BIT_AND_EXPR, type, chunk_size,
build_int_cst (type, -vf));
}
/* Collect additional arguments needed to emit a combined
parallel+workshare call. WS_STMT is the workshare directive being
expanded. */
static vec<tree, va_gc> *
get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
{
tree t;
location_t loc = gimple_location (ws_stmt);
vec<tree, va_gc> *ws_args;
if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
{
struct omp_for_data fd;
tree n1, n2;
omp_extract_for_data (for_stmt, &fd, NULL);
n1 = fd.loop.n1;
n2 = fd.loop.n2;
if (gimple_omp_for_combined_into_p (for_stmt))
{
tree innerc
= omp_find_clause (gimple_omp_parallel_clauses (par_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n1 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n2 = OMP_CLAUSE_DECL (innerc);
}
vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
t = fold_convert_loc (loc, long_integer_type_node, n1);
ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, n2);
ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
ws_args->quick_push (t);
if (fd.chunk_size)
{
t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
t = omp_adjust_chunk_size (t, fd.simd_schedule);
ws_args->quick_push (t);
}
return ws_args;
}
else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
{
/* Number of sections is equal to the number of edges from the
GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
the exit of the sections region. */
basic_block bb = single_succ (gimple_bb (ws_stmt));
t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
vec_alloc (ws_args, 1);
ws_args->quick_push (t);
return ws_args;
}
gcc_unreachable ();
}
/* Discover whether REGION is a combined parallel+workshare region. */
static void
determine_parallel_type (struct omp_region *region)
{
basic_block par_entry_bb, par_exit_bb;
basic_block ws_entry_bb, ws_exit_bb;
if (region == NULL || region->inner == NULL
|| region->exit == NULL || region->inner->exit == NULL
|| region->inner->cont == NULL)
return;
/* We only support parallel+for and parallel+sections. */
if (region->type != GIMPLE_OMP_PARALLEL
|| (region->inner->type != GIMPLE_OMP_FOR
&& region->inner->type != GIMPLE_OMP_SECTIONS))
return;
/* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
WS_EXIT_BB -> PAR_EXIT_BB. */
par_entry_bb = region->entry;
par_exit_bb = region->exit;
ws_entry_bb = region->inner->entry;
ws_exit_bb = region->inner->exit;
/* Give up for task reductions on the parallel, while it is implementable,
adding another big set of APIs or slowing down the normal paths is
not acceptable. */
tree pclauses = gimple_omp_parallel_clauses (last_stmt (par_entry_bb));
if (omp_find_clause (pclauses, OMP_CLAUSE__REDUCTEMP_))
return;
if (single_succ (par_entry_bb) == ws_entry_bb
&& single_succ (ws_exit_bb) == par_exit_bb
&& workshare_safe_to_combine_p (ws_entry_bb)
&& (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
{
gimple *par_stmt = last_stmt (par_entry_bb);
gimple *ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == GIMPLE_OMP_FOR)
{
/* If this is a combined parallel loop, we need to determine
whether or not to use the combined library calls. There
are two cases where we do not apply the transformation:
static loops and any kind of ordered loop. In the first
case, we already open code the loop so there is no need
to do anything else. In the latter case, the combined
parallel loop call would still need extra synchronization
to implement ordered semantics, so there would not be any
gain in using the combined call. */
tree clauses = gimple_omp_for_clauses (ws_stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE);
if (c == NULL
|| ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
== OMP_CLAUSE_SCHEDULE_STATIC)
|| omp_find_clause (clauses, OMP_CLAUSE_ORDERED)
|| omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_)
|| ((c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_))
&& POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (c)))))
return;
}
else if (region->inner->type == GIMPLE_OMP_SECTIONS
&& (omp_find_clause (gimple_omp_sections_clauses (ws_stmt),
OMP_CLAUSE__REDUCTEMP_)
|| omp_find_clause (gimple_omp_sections_clauses (ws_stmt),
OMP_CLAUSE__CONDTEMP_)))
return;
region->is_combined_parallel = true;
region->inner->is_combined_parallel = true;
region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
}
}
/* Debugging dumps for parallel regions. */
void dump_omp_region (FILE *, struct omp_region *, int);
void debug_omp_region (struct omp_region *);
void debug_all_omp_regions (void);
/* Dump the parallel region tree rooted at REGION. */
void
dump_omp_region (FILE *file, struct omp_region *region, int indent)
{
fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
gimple_code_name[region->type]);
if (region->inner)
dump_omp_region (file, region->inner, indent + 4);
if (region->cont)
{
fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
region->cont->index);
}
if (region->exit)
fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
region->exit->index);
else
fprintf (file, "%*s[no exit marker]\n", indent, "");
if (region->next)
dump_omp_region (file, region->next, indent);
}
DEBUG_FUNCTION void
debug_omp_region (struct omp_region *region)
{
dump_omp_region (stderr, region, 0);
}
DEBUG_FUNCTION void
debug_all_omp_regions (void)
{
dump_omp_region (stderr, root_omp_region, 0);
}
/* Create a new parallel region starting at STMT inside region PARENT. */
static struct omp_region *
new_omp_region (basic_block bb, enum gimple_code type,
struct omp_region *parent)
{
struct omp_region *region = XCNEW (struct omp_region);
region->outer = parent;
region->entry = bb;
region->type = type;
if (parent)
{
/* This is a nested region. Add it to the list of inner
regions in PARENT. */
region->next = parent->inner;
parent->inner = region;
}
else
{
/* This is a toplevel region. Add it to the list of toplevel
regions in ROOT_OMP_REGION. */
region->next = root_omp_region;
root_omp_region = region;
}
return region;
}
/* Release the memory associated with the region tree rooted at REGION. */
static void
free_omp_region_1 (struct omp_region *region)
{
struct omp_region *i, *n;
for (i = region->inner; i ; i = n)
{
n = i->next;
free_omp_region_1 (i);
}
free (region);
}
/* Release the memory for the entire omp region tree. */
void
omp_free_regions (void)
{
struct omp_region *r, *n;
for (r = root_omp_region; r ; r = n)
{
n = r->next;
free_omp_region_1 (r);
}
root_omp_region = NULL;
}
/* A convenience function to build an empty GIMPLE_COND with just the
condition. */
static gcond *
gimple_build_cond_empty (tree cond)
{
enum tree_code pred_code;
tree lhs, rhs;
gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
}
/* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function.
Add CHILD_FNDECL to decl chain of the supercontext of the block
ENTRY_BLOCK - this is the block which originally contained the
code from which CHILD_FNDECL was created.
Together, these actions ensure that the debug info for the outlined
function will be emitted with the correct lexical scope. */
static void
adjust_context_and_scope (struct omp_region *region, tree entry_block,
tree child_fndecl)
{
tree parent_fndecl = NULL_TREE;
gimple *entry_stmt;
/* OMP expansion expands inner regions before outer ones, so if
we e.g. have explicit task region nested in parallel region, when
expanding the task region current_function_decl will be the original
source function, but we actually want to use as context the child
function of the parallel. */
for (region = region->outer;
region && parent_fndecl == NULL_TREE; region = region->outer)
switch (region->type)
{
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_TEAMS:
entry_stmt = last_stmt (region->entry);
parent_fndecl = gimple_omp_taskreg_child_fn (entry_stmt);
break;
case GIMPLE_OMP_TARGET:
entry_stmt = last_stmt (region->entry);
parent_fndecl
= gimple_omp_target_child_fn (as_a <gomp_target *> (entry_stmt));
break;
default:
break;
}
if (parent_fndecl == NULL_TREE)
parent_fndecl = current_function_decl;
DECL_CONTEXT (child_fndecl) = parent_fndecl;
if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK)
{
tree b = BLOCK_SUPERCONTEXT (entry_block);
if (TREE_CODE (b) == BLOCK)
{
DECL_CHAIN (child_fndecl) = BLOCK_VARS (b);
BLOCK_VARS (b) = child_fndecl;
}
}
}
/* Build the function calls to GOMP_parallel etc to actually
generate the parallel operation. REGION is the parallel region
being expanded. BB is the block where to insert the code. WS_ARGS
will be set if this is a call to a combined parallel+workshare
construct, it contains the list of additional arguments needed by
the workshare construct. */
static void
expand_parallel_call (struct omp_region *region, basic_block bb,
gomp_parallel *entry_stmt,
vec<tree, va_gc> *ws_args)
{
tree t, t1, t2, val, cond, c, clauses, flags;
gimple_stmt_iterator gsi;
gimple *stmt;
enum built_in_function start_ix;
int start_ix2;
location_t clause_loc;
vec<tree, va_gc> *args;
clauses = gimple_omp_parallel_clauses (entry_stmt);
/* Determine what flavor of GOMP_parallel we will be
emitting. */
start_ix = BUILT_IN_GOMP_PARALLEL;
tree rtmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
if (rtmp)
start_ix = BUILT_IN_GOMP_PARALLEL_REDUCTIONS;
else if (is_combined_parallel (region))
{
switch (region->inner->type)
{
case GIMPLE_OMP_FOR:
gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
switch (region->inner->sched_kind)
{
case OMP_CLAUSE_SCHEDULE_RUNTIME:
/* For lastprivate(conditional:), our implementation
requires monotonic behavior. */
if (region->inner->has_lastprivate_conditional != 0)
start_ix2 = 3;
else if ((region->inner->sched_modifiers
& OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0)
start_ix2 = 6;
else if ((region->inner->sched_modifiers
& OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0)
start_ix2 = 7;
else
start_ix2 = 3;
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
case OMP_CLAUSE_SCHEDULE_GUIDED:
if ((region->inner->sched_modifiers
& OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0
&& !region->inner->has_lastprivate_conditional)
{
start_ix2 = 3 + region->inner->sched_kind;
break;
}
/* FALLTHRU */
default:
start_ix2 = region->inner->sched_kind;
break;
}
start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
start_ix = (enum built_in_function) start_ix2;
break;
case GIMPLE_OMP_SECTIONS:
start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
break;
default:
gcc_unreachable ();
}
}
/* By default, the value of NUM_THREADS is zero (selected at run time)
and there is no conditional. */
cond = NULL_TREE;
val = build_int_cst (unsigned_type_node, 0);
flags = build_int_cst (unsigned_type_node, 0);
c = omp_find_clause (clauses, OMP_CLAUSE_IF);
if (c)
cond = OMP_CLAUSE_IF_EXPR (c);
c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS);
if (c)
{
val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
clause_loc = OMP_CLAUSE_LOCATION (c);
}
else
clause_loc = gimple_location (entry_stmt);
c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND);
if (c)
flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
/* Ensure 'val' is of the correct type. */
val = fold_convert_loc (clause_loc, unsigned_type_node, val);
/* If we found the clause 'if (cond)', build either
(cond != 0) or (cond ? val : 1u). */
if (cond)
{
cond = gimple_boolify (cond);
if (integer_zerop (val))
val = fold_build2_loc (clause_loc,
EQ_EXPR, unsigned_type_node, cond,
build_int_cst (TREE_TYPE (cond), 0));
else
{
basic_block cond_bb, then_bb, else_bb;
edge e, e_then, e_else;
tree tmp_then, tmp_else, tmp_join, tmp_var;
tmp_var = create_tmp_var (TREE_TYPE (val));
if (gimple_in_ssa_p (cfun))
{
tmp_then = make_ssa_name (tmp_var);
tmp_else = make_ssa_name (tmp_var);
tmp_join = make_ssa_name (tmp_var);
}
else
{
tmp_then = tmp_var;
tmp_else = tmp_var;
tmp_join = tmp_var;
}
e = split_block_after_labels (bb);
cond_bb = e->src;
bb = e->dest;
remove_edge (e);
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
stmt = gimple_build_cond_empty (cond);
gsi = gsi_start_bb (cond_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi = gsi_start_bb (then_bb);
expand_omp_build_assign (&gsi, tmp_then, val, true);
gsi = gsi_start_bb (else_bb);
expand_omp_build_assign (&gsi, tmp_else,
build_int_cst (unsigned_type_node, 1),
true);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
add_bb_to_loop (then_bb, cond_bb->loop_father);
add_bb_to_loop (else_bb, cond_bb->loop_father);
e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
if (gimple_in_ssa_p (cfun))
{
gphi *phi = create_phi_node (tmp_join, bb);
add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
}
val = tmp_join;
}
gsi = gsi_start_bb (bb);
val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
gsi = gsi_last_nondebug_bb (bb);
t = gimple_omp_parallel_data_arg (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
else
t1 = build_fold_addr_expr (t);
tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt);
t2 = build_fold_addr_expr (child_fndecl);
vec_alloc (args, 4 + vec_safe_length (ws_args));
args->quick_push (t2);
args->quick_push (t1);
args->quick_push (val);
if (ws_args)
args->splice (*ws_args);
args->quick_push (flags);
t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
builtin_decl_explicit (start_ix), args);
if (rtmp)
{
tree type = TREE_TYPE (OMP_CLAUSE_DECL (rtmp));
t = build2 (MODIFY_EXPR, type, OMP_CLAUSE_DECL (rtmp),
fold_convert (type,
fold_convert (pointer_sized_int_node, t)));
}
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* Build the function call to GOMP_task to actually
generate the task operation. BB is the block where to insert the code. */
static void
expand_task_call (struct omp_region *region, basic_block bb,
gomp_task *entry_stmt)
{
tree t1, t2, t3;
gimple_stmt_iterator gsi;
location_t loc = gimple_location (entry_stmt);
tree clauses = gimple_omp_task_clauses (entry_stmt);
tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED);
tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE);
tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL);
tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY);
unsigned int iflags
= (untied ? GOMP_TASK_FLAG_UNTIED : 0)
| (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
| (depend ? GOMP_TASK_FLAG_DEPEND : 0);
bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
tree num_tasks = NULL_TREE;
bool ull = false;
if (taskloop_p)
{
gimple *g = last_stmt (region->outer->entry);
gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
struct omp_for_data fd;
omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL);
startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar),
OMP_CLAUSE__LOOPTEMP_);
startvar = OMP_CLAUSE_DECL (startvar);
endvar = OMP_CLAUSE_DECL (endvar);
step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
if (fd.loop.cond_code == LT_EXPR)
iflags |= GOMP_TASK_FLAG_UP;
tree tclauses = gimple_omp_for_clauses (g);
num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
if (num_tasks)
num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
else
{
num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
if (num_tasks)
{
iflags |= GOMP_TASK_FLAG_GRAINSIZE;
num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
}
else
num_tasks = integer_zero_node;
}
num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
if (ifc == NULL_TREE)
iflags |= GOMP_TASK_FLAG_IF;
if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP))
iflags |= GOMP_TASK_FLAG_NOGROUP;
ull = fd.iter_type == long_long_unsigned_type_node;
if (omp_find_clause (clauses, OMP_CLAUSE_REDUCTION))
iflags |= GOMP_TASK_FLAG_REDUCTION;
}
else if (priority)
iflags |= GOMP_TASK_FLAG_PRIORITY;
tree flags = build_int_cst (unsigned_type_node, iflags);
tree cond = boolean_true_node;
if (ifc)
{
if (taskloop_p)
{
tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
build_int_cst (unsigned_type_node,
GOMP_TASK_FLAG_IF),
build_int_cst (unsigned_type_node, 0));
flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
flags, t);
}
else
cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
}
if (finalc)
{
tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
build_int_cst (unsigned_type_node,
GOMP_TASK_FLAG_FINAL),
build_int_cst (unsigned_type_node, 0));
flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
}
if (depend)
depend = OMP_CLAUSE_DECL (depend);
else
depend = build_int_cst (ptr_type_node, 0);
if (priority)
priority = fold_convert (integer_type_node,
OMP_CLAUSE_PRIORITY_EXPR (priority));
else
priority = integer_zero_node;
gsi = gsi_last_nondebug_bb (bb);
tree t = gimple_omp_task_data_arg (entry_stmt);
if (t == NULL)
t2 = null_pointer_node;
else
t2 = build_fold_addr_expr_loc (loc, t);
t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
t = gimple_omp_task_copy_fn (entry_stmt);
if (t == NULL)
t3 = null_pointer_node;
else
t3 = build_fold_addr_expr_loc (loc, t);
if (taskloop_p)
t = build_call_expr (ull
? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
: builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
11, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
gimple_omp_task_arg_align (entry_stmt), flags,
num_tasks, priority, startvar, endvar, step);
else
t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
9, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
gimple_omp_task_arg_align (entry_stmt), cond, flags,
depend, priority);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* Build the function call to GOMP_taskwait_depend to actually
generate the taskwait operation. BB is the block where to insert the
code. */
static void
expand_taskwait_call (basic_block bb, gomp_task *entry_stmt)
{
tree clauses = gimple_omp_task_clauses (entry_stmt);
tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
if (depend == NULL_TREE)
return;
depend = OMP_CLAUSE_DECL (depend);
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
tree t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT_DEPEND),
1, depend);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* Build the function call to GOMP_teams_reg to actually
generate the host teams operation. REGION is the teams region
being expanded. BB is the block where to insert the code. */
static void
expand_teams_call (basic_block bb, gomp_teams *entry_stmt)
{
tree clauses = gimple_omp_teams_clauses (entry_stmt);
tree num_teams = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
if (num_teams == NULL_TREE)
num_teams = build_int_cst (unsigned_type_node, 0);
else
{
num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
num_teams = fold_convert (unsigned_type_node, num_teams);
}
tree thread_limit = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
if (thread_limit == NULL_TREE)
thread_limit = build_int_cst (unsigned_type_node, 0);
else
{
thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
thread_limit = fold_convert (unsigned_type_node, thread_limit);
}
gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
tree t = gimple_omp_teams_data_arg (entry_stmt), t1;
if (t == NULL)
t1 = null_pointer_node;
else
t1 = build_fold_addr_expr (t);
tree child_fndecl = gimple_omp_teams_child_fn (entry_stmt);
tree t2 = build_fold_addr_expr (child_fndecl);
vec<tree, va_gc> *args;
vec_alloc (args, 5);
args->quick_push (t2);
args->quick_push (t1);
args->quick_push (num_teams);
args->quick_push (thread_limit);
/* For future extensibility. */
args->quick_push (build_zero_cst (unsigned_type_node));
t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
builtin_decl_explicit (BUILT_IN_GOMP_TEAMS_REG),
args);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
static tree
vec2chain (vec<tree, va_gc> *v)
{
tree chain = NULL_TREE, t;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
{
DECL_CHAIN (t) = chain;
chain = t;
}
return chain;
}
/* Remove barriers in REGION->EXIT's block. Note that this is only
valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
removed. */
static void
remove_exit_barrier (struct omp_region *region)
{
gimple_stmt_iterator gsi;
basic_block exit_bb;
edge_iterator ei;
edge e;
gimple *stmt;
int any_addressable_vars = -1;
exit_bb = region->exit;
/* If the parallel region doesn't return, we don't have REGION->EXIT
block at all. */
if (! exit_bb)
return;
/* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
statements that can appear in between are extremely limited -- no
memory operations at all. Here, we allow nothing at all, so the
only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
gsi_prev_nondebug (&gsi);
if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return;
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
gsi = gsi_last_nondebug_bb (e->src);
if (gsi_end_p (gsi))
continue;
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) == GIMPLE_OMP_RETURN
&& !gimple_omp_return_nowait_p (stmt))
{
/* OpenMP 3.0 tasks unfortunately prevent this optimization
in many cases. If there could be tasks queued, the barrier
might be needed to let the tasks run before some local
variable of the parallel that the task uses as shared
runs out of scope. The task can be spawned either
from within current function (this would be easy to check)
or from some function it calls and gets passed an address
of such a variable. */
if (any_addressable_vars < 0)
{
gomp_parallel *parallel_stmt
= as_a <gomp_parallel *> (last_stmt (region->entry));
tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
tree local_decls, block, decl;
unsigned ix;
any_addressable_vars = 0;
FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
if (TREE_ADDRESSABLE (decl))
{
any_addressable_vars = 1;
break;
}
for (block = gimple_block (stmt);
!any_addressable_vars
&& block
&& TREE_CODE (block) == BLOCK;
block = BLOCK_SUPERCONTEXT (block))
{
for (local_decls = BLOCK_VARS (block);
local_decls;
local_decls = DECL_CHAIN (local_decls))
if (TREE_ADDRESSABLE (local_decls))
{
any_addressable_vars = 1;
break;
}
if (block == gimple_block (parallel_stmt))
break;
}
}
if (!any_addressable_vars)
gimple_omp_return_set_nowait (stmt);
}
}
}
static void
remove_exit_barriers (struct omp_region *region)
{
if (region->type == GIMPLE_OMP_PARALLEL)
remove_exit_barrier (region);
if (region->inner)
{
region = region->inner;
remove_exit_barriers (region);
while (region->next)
{
region = region->next;
remove_exit_barriers (region);
}
}
}
/* Optimize omp_get_thread_num () and omp_get_num_threads ()
calls. These can't be declared as const functions, but
within one parallel body they are constant, so they can be
transformed there into __builtin_omp_get_{thread_num,num_threads} ()
which are declared const. Similarly for task body, except
that in untied task omp_get_thread_num () can change at any task
scheduling point. */
static void
optimize_omp_library_calls (gimple *entry_stmt)
{
basic_block bb;
gimple_stmt_iterator gsi;
tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
&& omp_find_clause (gimple_omp_task_clauses (entry_stmt),
OMP_CLAUSE_UNTIED) != NULL);
FOR_EACH_BB_FN (bb, cfun)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *call = gsi_stmt (gsi);
tree decl;
if (is_gimple_call (call)
&& (decl = gimple_call_fndecl (call))
&& DECL_EXTERNAL (decl)
&& TREE_PUBLIC (decl)
&& DECL_INITIAL (decl) == NULL)
{
tree built_in;
if (DECL_NAME (decl) == thr_num_id)
{
/* In #pragma omp task untied omp_get_thread_num () can change
during the execution of the task region. */
if (untied_task)
continue;
built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
}
else if (DECL_NAME (decl) == num_thr_id)
built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
else
continue;
if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
|| gimple_call_num_args (call) != 0)
continue;
if (flag_exceptions && !TREE_NOTHROW (decl))
continue;
if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
|| !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
TREE_TYPE (TREE_TYPE (built_in))))
continue;
gimple_call_set_fndecl (call, built_in);
}
}
}
/* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
regimplified. */
static tree
expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
{
tree t = *tp;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
return t;
if (TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !TYPE_P (t) && !DECL_P (t);
return NULL_TREE;
}
/* Prepend or append TO = FROM assignment before or after *GSI_P. */
static void
expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
bool after)
{
bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
!after, after ? GSI_CONTINUE_LINKING
: GSI_SAME_STMT);
gimple *stmt = gimple_build_assign (to, from);
if (after)
gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
else
gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
{
gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
gimple_regimplify_operands (stmt, &gsi);
}
}
/* Expand the OpenMP parallel or task directive starting at REGION. */
static void
expand_omp_taskreg (struct omp_region *region)
{
basic_block entry_bb, exit_bb, new_bb;
struct function *child_cfun;
tree child_fn, block, t;
gimple_stmt_iterator gsi;
gimple *entry_stmt, *stmt;
edge e;
vec<tree, va_gc> *ws_args;
entry_stmt = last_stmt (region->entry);
if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (entry_stmt))
{
new_bb = region->entry;
gsi = gsi_last_nondebug_bb (region->entry);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
gsi_remove (&gsi, true);
expand_taskwait_call (new_bb, as_a <gomp_task *> (entry_stmt));
return;
}
child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
entry_bb = region->entry;
if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
exit_bb = region->cont;
else
exit_bb = region->exit;
if (is_combined_parallel (region))
ws_args = region->ws_args;
else
ws_args = NULL;
if (child_cfun->cfg)
{
/* Due to inlining, it may happen that we have already outlined
the region, in which case all we need to do is make the
sub-graph unreachable and emit the parallel call. */
edge entry_succ_e, exit_succ_e;
entry_succ_e = single_succ_edge (entry_bb);
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TEAMS);
gsi_remove (&gsi, true);
new_bb = entry_bb;
if (exit_bb)
{
exit_succ_e = single_succ_edge (exit_bb);
make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
}
remove_edge_and_dominated_blocks (entry_succ_e);
}
else
{
unsigned srcidx, dstidx, num;
/* If the parallel region needs data sent from the parent
function, then the very first statement (except possible
tree profile counter updates) of the parallel body
is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
&.OMP_DATA_O is passed as an argument to the child function,
we need to replace it with the argument as seen by the child
function.
In most cases, this will end up being the identity assignment
.OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
a function call that has been inlined, the original PARM_DECL
.OMP_DATA_I may have been converted into a different local
variable. In which case, we need to keep the assignment. */
if (gimple_omp_taskreg_data_arg (entry_stmt))
{
basic_block entry_succ_bb
= single_succ_p (entry_bb) ? single_succ (entry_bb)
: FALLTHRU_EDGE (entry_bb)->dest;
tree arg;
gimple *parcopy_stmt = NULL;
for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
{
gimple *stmt;
gcc_assert (!gsi_end_p (gsi));
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_ASSIGN)
continue;
if (gimple_num_ops (stmt) == 2)
{
tree arg = gimple_assign_rhs1 (stmt);
/* We're ignore the subcode because we're
effectively doing a STRIP_NOPS. */
if (TREE_CODE (arg) == ADDR_EXPR
&& (TREE_OPERAND (arg, 0)
== gimple_omp_taskreg_data_arg (entry_stmt)))
{
parcopy_stmt = stmt;
break;
}
}
}
gcc_assert (parcopy_stmt != NULL);
arg = DECL_ARGUMENTS (child_fn);
if (!gimple_in_ssa_p (cfun))
{
if (gimple_assign_lhs (parcopy_stmt) == arg)
gsi_remove (&gsi, true);
else
{
/* ?? Is setting the subcode really necessary ?? */
gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
gimple_assign_set_rhs1 (parcopy_stmt, arg);
}
}
else
{
tree lhs = gimple_assign_lhs (parcopy_stmt);
gcc_assert (SSA_NAME_VAR (lhs) == arg);
/* We'd like to set the rhs to the default def in the child_fn,
but it's too early to create ssa names in the child_fn.
Instead, we set the rhs to the parm. In
move_sese_region_to_fn, we introduce a default def for the
parm, map the parm to it's default def, and once we encounter
this stmt, replace the parm with the default def. */
gimple_assign_set_rhs1 (parcopy_stmt, arg);
update_stmt (parcopy_stmt);
}
}
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
/* The gimplifier could record temporaries in parallel/task block
rather than in containing function's local_decls chain,
which would mean cgraph missed finalizing them. Do it now. */
for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
varpool_node::finalize_decl (t);
DECL_SAVED_TREE (child_fn) = NULL;
/* We'll create a CFG for child_fn, so no gimple body is needed. */
gimple_set_body (child_fn, NULL);
TREE_USED (block) = 1;
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
so that it can be moved to the child function. */
gsi = gsi_last_nondebug_bb (entry_bb);
stmt = gsi_stmt (gsi);
gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
|| gimple_code (stmt) == GIMPLE_OMP_TASK
|| gimple_code (stmt) == GIMPLE_OMP_TEAMS));
e = split_block (entry_bb, stmt);
gsi_remove (&gsi, true);
entry_bb = e->dest;
edge e2 = NULL;
if (gimple_code (entry_stmt) != GIMPLE_OMP_TASK)
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
else
{
e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
gcc_assert (e2->dest == region->exit);
remove_edge (BRANCH_EDGE (entry_bb));
set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
gsi = gsi_last_nondebug_bb (region->exit);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
gsi_remove (&gsi, true);
}
/* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
if (exit_bb)
{
gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (!gsi_end_p (gsi)
&& (gimple_code (gsi_stmt (gsi))
== (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
stmt = gimple_build_return (NULL);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
}
/* Move the parallel region into CHILD_CFUN. */
if (gimple_in_ssa_p (cfun))
{
init_tree_ssa (child_cfun);
init_ssa_operands (child_cfun);
child_cfun->gimple_df->in_ssa_p = true;
block = NULL_TREE;
}
else
block = gimple_block (entry_stmt);
new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
if (exit_bb)
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
if (e2)
{
basic_block dest_bb = e2->dest;
if (!exit_bb)
make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
remove_edge (e2);
set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
}
/* When the OMP expansion process cannot guarantee an up-to-date
loop tree arrange for the child function to fixup loops. */
if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
/* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
num = vec_safe_length (child_cfun->local_decls);
for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
{
t = (*child_cfun->local_decls)[srcidx];
if (DECL_CONTEXT (t) == cfun->decl)
continue;
if (srcidx != dstidx)
(*child_cfun->local_decls)[dstidx] = t;
dstidx++;
}
if (dstidx != num)
vec_safe_truncate (child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
child_cfun->curr_properties = cfun->curr_properties;
child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, true);
bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
&& !DECL_ASSEMBLER_NAME_SET_P (child_fn);
/* Fix the callgraph edges for child_cfun. Those for cfun will be
fixed in a following pass. */
push_cfun (child_cfun);
if (need_asm)
assign_assembler_name_if_needed (child_fn);
if (optimize)
optimize_omp_library_calls (entry_stmt);
update_max_bb_count ();
cgraph_edge::rebuild_edges ();
/* Some EH regions might become dead, see PR34608. If
pass_cleanup_cfg isn't the first pass to happen with the
new child, these dead EH edges might cause problems.
Clean them up now. */
if (flag_exceptions)
{
basic_block bb;
bool changed = false;
FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
}
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa);
if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
pop_cfun ();
if (dump_file && !gimple_in_ssa_p (cfun))
{
omp_any_child_fn_dumped = true;
dump_function_header (dump_file, child_fn, dump_flags);
dump_function_to_file (child_fn, dump_file, dump_flags);
}
}
adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn);
if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
expand_parallel_call (region, new_bb,
as_a <gomp_parallel *> (entry_stmt), ws_args);
else if (gimple_code (entry_stmt) == GIMPLE_OMP_TEAMS)
expand_teams_call (new_bb, as_a <gomp_teams *> (entry_stmt));
else
expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_only_virtuals);
}
/* Information about members of an OpenACC collapsed loop nest. */
struct oacc_collapse
{
tree base; /* Base value. */
tree iters; /* Number of steps. */
tree step; /* Step size. */
tree tile; /* Tile increment (if tiled). */
tree outer; /* Tile iterator var. */
};
/* Helper for expand_oacc_for. Determine collapsed loop information.
Fill in COUNTS array. Emit any initialization code before GSI.
Return the calculated outer loop bound of BOUND_TYPE. */
static tree
expand_oacc_collapse_init (const struct omp_for_data *fd,
gimple_stmt_iterator *gsi,
oacc_collapse *counts, tree bound_type,
location_t loc)
{
tree tiling = fd->tiling;
tree total = build_int_cst (bound_type, 1);
int ix;
gcc_assert (integer_onep (fd->loop.step));
gcc_assert (integer_zerop (fd->loop.n1));
/* When tiling, the first operand of the tile clause applies to the
innermost loop, and we work outwards from there. Seems
backwards, but whatever. */
for (ix = fd->collapse; ix--;)
{
const omp_for_data_loop *loop = &fd->loops[ix];
tree iter_type = TREE_TYPE (loop->v);
tree diff_type = iter_type;
tree plus_type = iter_type;
gcc_assert (loop->cond_code == fd->loop.cond_code);
if (POINTER_TYPE_P (iter_type))
plus_type = sizetype;
if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
diff_type = signed_type_for (diff_type);
if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node))
diff_type = integer_type_node;
if (tiling)
{
tree num = build_int_cst (integer_type_node, fd->collapse);
tree loop_no = build_int_cst (integer_type_node, ix);
tree tile = TREE_VALUE (tiling);
gcall *call
= gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile,
/* gwv-outer=*/integer_zero_node,
/* gwv-inner=*/integer_zero_node);
counts[ix].outer = create_tmp_var (iter_type, ".outer");
counts[ix].tile = create_tmp_var (diff_type, ".tile");
gimple_call_set_lhs (call, counts[ix].tile);
gimple_set_location (call, loc);
gsi_insert_before (gsi, call, GSI_SAME_STMT);
tiling = TREE_CHAIN (tiling);
}
else
{
counts[ix].tile = NULL;
counts[ix].outer = loop->v;
}
tree b = loop->n1;
tree e = loop->n2;
tree s = loop->step;
bool up = loop->cond_code == LT_EXPR;
tree dir = build_int_cst (diff_type, up ? +1 : -1);
bool negating;
tree expr;
b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
true, GSI_SAME_STMT);
e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
true, GSI_SAME_STMT);
/* Convert the step, avoiding possible unsigned->signed overflow. */
negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
if (negating)
s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
s = fold_convert (diff_type, s);
if (negating)
s = fold_build1 (NEGATE_EXPR, diff_type, s);
s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
true, GSI_SAME_STMT);
/* Determine the range, avoiding possible unsigned->signed overflow. */
negating = !up && TYPE_UNSIGNED (iter_type);
expr = fold_build2 (MINUS_EXPR, plus_type,
fold_convert (plus_type, negating ? b : e),
fold_convert (plus_type, negating ? e : b));
expr = fold_convert (diff_type, expr);
if (negating)
expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
tree range = force_gimple_operand_gsi
(gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
/* Determine number of iterations. */
expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
true, GSI_SAME_STMT);
counts[ix].base = b;
counts[ix].iters = iters;
counts[ix].step = s;
total = fold_build2 (MULT_EXPR, bound_type, total,
fold_convert (bound_type, iters));
}
return total;
}
/* Emit initializers for collapsed loop members. INNER is true if
this is for the element loop of a TILE. IVAR is the outer
loop iteration variable, from which collapsed loop iteration values
are calculated. COUNTS array has been initialized by
expand_oacc_collapse_inits. */
static void
expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner,
gimple_stmt_iterator *gsi,
const oacc_collapse *counts, tree ivar)
{
tree ivar_type = TREE_TYPE (ivar);
/* The most rapidly changing iteration variable is the innermost
one. */
for (int ix = fd->collapse; ix--;)
{
const omp_for_data_loop *loop = &fd->loops[ix];
const oacc_collapse *collapse = &counts[ix];
tree v = inner ? loop->v : collapse->outer;
tree iter_type = TREE_TYPE (v);
tree diff_type = TREE_TYPE (collapse->step);
tree plus_type = iter_type;
enum tree_code plus_code = PLUS_EXPR;
tree expr;
if (POINTER_TYPE_P (iter_type))
{
plus_code = POINTER_PLUS_EXPR;
plus_type = sizetype;
}
expr = ivar;
if (ix)
{
tree mod = fold_convert (ivar_type, collapse->iters);
ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod);
expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod);
ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE,
true, GSI_SAME_STMT);
}
expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
collapse->step);
expr = fold_build2 (plus_code, iter_type,
inner ? collapse->outer : collapse->base,
fold_convert (plus_type, expr));
expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
gassign *ass = gimple_build_assign (v, expr);
gsi_insert_before (gsi, ass, GSI_SAME_STMT);
}
}
/* Helper function for expand_omp_{for_*,simd}. If this is the outermost
of the combined collapse > 1 loop constructs, generate code like:
if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
if (cond3 is <)
adj = STEP3 - 1;
else
adj = STEP3 + 1;
count3 = (adj + N32 - N31) / STEP3;
if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
if (cond2 is <)
adj = STEP2 - 1;
else
adj = STEP2 + 1;
count2 = (adj + N22 - N21) / STEP2;
if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
if (cond1 is <)
adj = STEP1 - 1;
else
adj = STEP1 + 1;
count1 = (adj + N12 - N11) / STEP1;
count = count1 * count2 * count3;
Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
count = 0;
and set ZERO_ITER_BB to that bb. If this isn't the outermost
of the combined loop constructs, just initialize COUNTS array
from the _looptemp_ clauses. For loop nests with non-rectangular
loops, do this only for the rectangular loops. Then pick
the loops which reference outer vars in their bound expressions
and the loops which they refer to and for this sub-nest compute
number of iterations. For triangular loops use Faulhaber's formula,
otherwise as a fallback, compute by iterating the loops.
If e.g. the sub-nest is
for (I = N11; I COND1 N12; I += STEP1)
for (J = M21 * I + N21; J COND2 M22 * I + N22; J += STEP2)
for (K = M31 * J + N31; K COND3 M32 * J + N32; K += STEP3)
do:
COUNT = 0;
for (tmpi = N11; tmpi COND1 N12; tmpi += STEP1)
for (tmpj = M21 * tmpi + N21;
tmpj COND2 M22 * tmpi + N22; tmpj += STEP2)
{
int tmpk1 = M31 * tmpj + N31;
int tmpk2 = M32 * tmpj + N32;
if (tmpk1 COND3 tmpk2)
{
if (COND3 is <)
adj = STEP3 - 1;
else
adj = STEP3 + 1;
COUNT += (adj + tmpk2 - tmpk1) / STEP3;
}
}
and finally multiply the counts of the rectangular loops not
in the sub-nest with COUNT. Also, as counts[fd->last_nonrect]
store number of iterations of the loops from fd->first_nonrect
to fd->last_nonrect inclusive, i.e. the above COUNT multiplied
by the counts of rectangular loops not referenced in any non-rectangular
loops sandwitched in between those. */
/* NOTE: It *could* be better to moosh all of the BBs together,
creating one larger BB with all the computation and the unexpected
jump at the end. I.e.
bool zero3, zero2, zero1, zero;
zero3 = N32 c3 N31;
count3 = (N32 - N31) /[cl] STEP3;
zero2 = N22 c2 N21;
count2 = (N22 - N21) /[cl] STEP2;
zero1 = N12 c1 N11;
count1 = (N12 - N11) /[cl] STEP1;
zero = zero3 || zero2 || zero1;
count = count1 * count2 * count3;
if (__builtin_expect(zero, false)) goto zero_iter_bb;
After all, we expect the zero=false, and thus we expect to have to
evaluate all of the comparison expressions, so short-circuiting
oughtn't be a win. Since the condition isn't protecting a
denominator, we're not concerned about divide-by-zero, so we can
fully evaluate count even if a numerator turned out to be wrong.
It seems like putting this all together would create much better
scheduling opportunities, and less pressure on the chip's branch
predictor. */
static void
expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
basic_block &entry_bb, tree *counts,
basic_block &zero_iter1_bb, int &first_zero_iter1,
basic_block &zero_iter2_bb, int &first_zero_iter2,
basic_block &l2_dom_bb)
{
tree t, type = TREE_TYPE (fd->loop.v);
edge e, ne;
int i;
/* Collapsed loops need work for expansion into SSA form. */
gcc_assert (!gimple_in_ssa_p (cfun));
if (gimple_omp_for_combined_into_p (fd->for_stmt)
&& TREE_CODE (fd->loop.n2) != INTEGER_CST)
{
gcc_assert (fd->ordered == 0);
/* First two _looptemp_ clauses are for istart/iend, counts[0]
isn't supposed to be handled, as the inner loop doesn't
use it. */
tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
for (i = 0; i < fd->collapse; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
if (i)
counts[i] = OMP_CLAUSE_DECL (innerc);
else
counts[0] = NULL_TREE;
}
if (fd->non_rect
&& fd->last_nonrect == fd->first_nonrect + 1
&& !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v)))
{
tree c[4];
for (i = 0; i < 4; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
c[i] = OMP_CLAUSE_DECL (innerc);
}
counts[0] = c[0];
fd->first_inner_iterations = c[1];
fd->factor = c[2];
fd->adjn1 = c[3];
}
return;
}
for (i = fd->collapse; i < fd->ordered; i++)
{
tree itype = TREE_TYPE (fd->loops[i].v);
counts[i] = NULL_TREE;
t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
fold_convert (itype, fd->loops[i].n1),
fold_convert (itype, fd->loops[i].n2));
if (t && integer_zerop (t))
{
for (i = fd->collapse; i < fd->ordered; i++)
counts[i] = build_int_cst (type, 0);
break;
}
}
bool rect_count_seen = false;
for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
{
tree itype = TREE_TYPE (fd->loops[i].v);
if (i >= fd->collapse && counts[i])
continue;
if (fd->non_rect)
{
/* Skip loops that use outer iterators in their expressions
during this phase. */
if (fd->loops[i].m1 || fd->loops[i].m2)
{
counts[i] = build_zero_cst (type);
continue;
}
}
if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
&& ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
fold_convert (itype, fd->loops[i].n1),
fold_convert (itype, fd->loops[i].n2)))
== NULL_TREE || !integer_onep (t)))
{
gcond *cond_stmt;
tree n1, n2;
n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL))
{
*gsi = gsi_for_stmt (cond_stmt);
gimple_regimplify_operands (cond_stmt, gsi);
}
e = split_block (entry_bb, cond_stmt);
basic_block &zero_iter_bb
= i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
int &first_zero_iter
= i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
if (zero_iter_bb == NULL)
{
gassign *assign_stmt;
first_zero_iter = i;
zero_iter_bb = create_empty_bb (entry_bb);
add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
*gsi = gsi_after_labels (zero_iter_bb);
if (i < fd->collapse)
assign_stmt = gimple_build_assign (fd->loop.n2,
build_zero_cst (type));
else
{
counts[i] = create_tmp_reg (type, ".count");
assign_stmt
= gimple_build_assign (counts[i], build_zero_cst (type));
}
gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
entry_bb);
}
ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
ne->probability = profile_probability::very_unlikely ();
e->flags = EDGE_TRUE_VALUE;
e->probability = ne->probability.invert ();
if (l2_dom_bb == NULL)
l2_dom_bb = entry_bb;
entry_bb = e->dest;
*gsi = gsi_last_nondebug_bb (entry_bb);
}
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loops[i].step), t);
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i].n2));
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i].n1));
/* ?? We could probably use CEIL_DIV_EXPR instead of
TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
generate the same code in the end because generically we
don't know that the values involved must be negative for
GT?? */
if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype,
fold_convert (itype,
fd->loops[i].step)));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
fold_convert (itype, fd->loops[i].step));
t = fold_convert (type, t);
if (TREE_CODE (t) == INTEGER_CST)
counts[i] = t;
else
{
if (i < fd->collapse || i != first_zero_iter2)
counts[i] = create_tmp_reg (type, ".count");
expand_omp_build_assign (gsi, counts[i], t);
}
if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
{
if (fd->non_rect && i >= fd->first_nonrect && i <= fd->last_nonrect)
continue;
if (!rect_count_seen)
{
t = counts[i];
rect_count_seen = true;
}
else
t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
expand_omp_build_assign (gsi, fd->loop.n2, t);
}
}
if (fd->non_rect && SSA_VAR_P (fd->loop.n2))
{
gcc_assert (fd->last_nonrect != -1);
counts[fd->last_nonrect] = create_tmp_reg (type, ".count");
expand_omp_build_assign (gsi, counts[fd->last_nonrect],
build_zero_cst (type));
for (i = fd->first_nonrect + 1; i < fd->last_nonrect; i++)
if (fd->loops[i].m1
|| fd->loops[i].m2
|| fd->loops[i].non_rect_referenced)
break;
if (i == fd->last_nonrect
&& fd->loops[i].outer == fd->last_nonrect - fd->first_nonrect
&& !TYPE_UNSIGNED (TREE_TYPE (fd->loops[i].v)))
{
int o = fd->first_nonrect;
tree itype = TREE_TYPE (fd->loops[o].v);
tree n1o = create_tmp_reg (itype, ".n1o");
t = fold_convert (itype, unshare_expr (fd->loops[o].n1));
expand_omp_build_assign (gsi, n1o, t);
tree n2o = create_tmp_reg (itype, ".n2o");
t = fold_convert (itype, unshare_expr (fd->loops[o].n2));
expand_omp_build_assign (gsi, n2o, t);
if (fd->loops[i].m1 && fd->loops[i].m2)
t = fold_build2 (MINUS_EXPR, itype, unshare_expr (fd->loops[i].m2),
unshare_expr (fd->loops[i].m1));
else if (fd->loops[i].m1)
t = fold_unary (NEGATE_EXPR, itype,
unshare_expr (fd->loops[i].m1));
else
t = unshare_expr (fd->loops[i].m2);
tree m2minusm1
= force_gimple_operand_gsi (gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
gimple_stmt_iterator gsi2 = *gsi;
gsi_prev (&gsi2);
e = split_block (entry_bb, gsi_stmt (gsi2));
e = split_block (e->dest, (gimple *) NULL);
basic_block bb1 = e->src;
entry_bb = e->dest;
*gsi = gsi_after_labels (entry_bb);
gsi2 = gsi_after_labels (bb1);
tree ostep = fold_convert (itype, fd->loops[o].step);
t = build_int_cst (itype, (fd->loops[o].cond_code
== LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype, ostep, t);
t = fold_build2 (PLUS_EXPR, itype, t, n2o);
t = fold_build2 (MINUS_EXPR, itype, t, n1o);
if (TYPE_UNSIGNED (itype)
&& fd->loops[o].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, ostep));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, ostep);
tree outer_niters
= force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_build2 (MINUS_EXPR, itype, outer_niters,
build_one_cst (itype));
t = fold_build2 (MULT_EXPR, itype, t, ostep);
t = fold_build2 (PLUS_EXPR, itype, n1o, t);
tree last = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
tree n1, n2, n1e, n2e;
t = fold_convert (itype, unshare_expr (fd->loops[i].n1));
if (fd->loops[i].m1)
{
n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1));
n1 = fold_build2 (MULT_EXPR, itype, n1o, n1);
n1 = fold_build2 (PLUS_EXPR, itype, n1, t);
}
else
n1 = t;
n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, unshare_expr (fd->loops[i].n2));
if (fd->loops[i].m2)
{
n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2));
n2 = fold_build2 (MULT_EXPR, itype, n1o, n2);
n2 = fold_build2 (PLUS_EXPR, itype, n2, t);
}
else
n2 = t;
n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, unshare_expr (fd->loops[i].n1));
if (fd->loops[i].m1)
{
n1e = fold_convert (itype, unshare_expr (fd->loops[i].m1));
n1e = fold_build2 (MULT_EXPR, itype, last, n1e);
n1e = fold_build2 (PLUS_EXPR, itype, n1e, t);
}
else
n1e = t;
n1e = force_gimple_operand_gsi (&gsi2, n1e, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, unshare_expr (fd->loops[i].n2));
if (fd->loops[i].m2)
{
n2e = fold_convert (itype, unshare_expr (fd->loops[i].m2));
n2e = fold_build2 (MULT_EXPR, itype, last, n2e);
n2e = fold_build2 (PLUS_EXPR, itype, n2e, t);
}
else
n2e = t;
n2e = force_gimple_operand_gsi (&gsi2, n2e, true, NULL_TREE,
true, GSI_SAME_STMT);
gcond *cond_stmt
= gimple_build_cond (fd->loops[i].cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (bb1, cond_stmt);
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::likely ().guessed ();
basic_block bb2 = e->dest;
gsi2 = gsi_after_labels (bb2);
cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1e, n2e,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (bb2, cond_stmt);
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::likely ().guessed ();
gsi2 = gsi_after_labels (e->dest);
tree step = fold_convert (itype, fd->loops[i].step);
t = build_int_cst (itype, (fd->loops[i].cond_code
== LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype, step, t);
t = fold_build2 (PLUS_EXPR, itype, t, n2);
t = fold_build2 (MINUS_EXPR, itype, t, n1);
if (TYPE_UNSIGNED (itype)
&& fd->loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
tree first_inner_iterations
= force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_build2 (MULT_EXPR, itype, m2minusm1, ostep);
if (TYPE_UNSIGNED (itype)
&& fd->loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
tree factor
= force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_build2 (MINUS_EXPR, itype, outer_niters,
build_one_cst (itype));
t = fold_build2 (MULT_EXPR, itype, t, outer_niters);
t = fold_build2 (RSHIFT_EXPR, itype, t, integer_one_node);
t = fold_build2 (MULT_EXPR, itype, factor, t);
t = fold_build2 (PLUS_EXPR, itype,
fold_build2 (MULT_EXPR, itype, outer_niters,
first_inner_iterations), t);
expand_omp_build_assign (&gsi2, counts[fd->last_nonrect],
fold_convert (type, t));
basic_block bb3 = create_empty_bb (bb1);
add_bb_to_loop (bb3, bb1->loop_father);
e = make_edge (bb1, bb3, EDGE_FALSE_VALUE);
e->probability = profile_probability::unlikely ().guessed ();
gsi2 = gsi_after_labels (bb3);
cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1e, n2e,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (bb3, cond_stmt);
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::likely ().guessed ();
basic_block bb4 = e->dest;
ne = make_edge (bb3, entry_bb, EDGE_FALSE_VALUE);
ne->probability = e->probability.invert ();
basic_block bb5 = create_empty_bb (bb2);
add_bb_to_loop (bb5, bb2->loop_father);
ne = make_edge (bb2, bb5, EDGE_FALSE_VALUE);
ne->probability = profile_probability::unlikely ().guessed ();
for (int j = 0; j < 2; j++)
{
gsi2 = gsi_after_labels (j ? bb5 : bb4);
t = fold_build2 (MINUS_EXPR, itype,
unshare_expr (fd->loops[i].n1),
unshare_expr (fd->loops[i].n2));
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, m2minusm1);
tree tem
= force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_build2 (MINUS_EXPR, itype, tem, n1o);
t = fold_build2 (TRUNC_MOD_EXPR, itype, t, ostep);
t = fold_build2 (MINUS_EXPR, itype, tem, t);
tem = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, unshare_expr (fd->loops[i].n1));
if (fd->loops[i].m1)
{
n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1));
n1 = fold_build2 (MULT_EXPR, itype, tem, n1);
n1 = fold_build2 (PLUS_EXPR, itype, n1, t);
}
else
n1 = t;
n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
t = fold_convert (itype, unshare_expr (fd->loops[i].n2));
if (fd->loops[i].m2)
{
n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2));
n2 = fold_build2 (MULT_EXPR, itype, tem, n2);
n2 = fold_build2 (PLUS_EXPR, itype, n2, t);
}
else
n2 = t;
n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
expand_omp_build_assign (&gsi2, j ? n2o : n1o, tem);
cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (gsi_bb (gsi2), cond_stmt);
e->flags = j ? EDGE_TRUE_VALUE : EDGE_FALSE_VALUE;
e->probability = profile_probability::unlikely ().guessed ();
ne = make_edge (e->src, bb1,
j ? EDGE_FALSE_VALUE : EDGE_TRUE_VALUE);
ne->probability = e->probability.invert ();
gsi2 = gsi_after_labels (e->dest);
t = fold_build2 (PLUS_EXPR, itype, tem, ostep);
expand_omp_build_assign (&gsi2, j ? n2o : n1o, t);
make_edge (e->dest, bb1, EDGE_FALLTHRU);
}
set_immediate_dominator (CDI_DOMINATORS, bb3, bb1);
set_immediate_dominator (CDI_DOMINATORS, bb5, bb2);
set_immediate_dominator (CDI_DOMINATORS, entry_bb, bb1);
if (fd->first_nonrect + 1 == fd->last_nonrect)
{
fd->first_inner_iterations = first_inner_iterations;
fd->factor = factor;
fd->adjn1 = n1o;
}
}
else
{
/* Fallback implementation. Evaluate the loops with m1/m2
non-NULL as well as their outer loops at runtime using temporaries
instead of the original iteration variables, and in the
body just bump the counter. */
gimple_stmt_iterator gsi2 = *gsi;
gsi_prev (&gsi2);
e = split_block (entry_bb, gsi_stmt (gsi2));
e = split_block (e->dest, (gimple *) NULL);
basic_block cur_bb = e->src;
basic_block next_bb = e->dest;
entry_bb = e->dest;
*gsi = gsi_after_labels (entry_bb);
tree *vs = XALLOCAVEC (tree, fd->last_nonrect);
memset (vs, 0, fd->last_nonrect * sizeof (tree));
for (i = 0; i <= fd->last_nonrect; i++)
{
if (fd->loops[i].m1 == NULL_TREE
&& fd->loops[i].m2 == NULL_TREE
&& !fd->loops[i].non_rect_referenced)
continue;
tree itype = TREE_TYPE (fd->loops[i].v);
gsi2 = gsi_after_labels (cur_bb);
tree n1, n2;
t = fold_convert (itype, unshare_expr (fd->loops[i].n1));
if (fd->loops[i].m1)
{
n1 = fold_convert (itype, unshare_expr (fd->loops[i].m1));
n1 = fold_build2 (MULT_EXPR, itype,
vs[i - fd->loops[i].outer], n1);
n1 = fold_build2 (PLUS_EXPR, itype, n1, t);
}
else
n1 = t;
n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
if (i < fd->last_nonrect)
{
vs[i] = create_tmp_reg (itype, ".it");
expand_omp_build_assign (&gsi2, vs[i], n1);
}
t = fold_convert (itype, unshare_expr (fd->loops[i].n2));
if (fd->loops[i].m2)
{
n2 = fold_convert (itype, unshare_expr (fd->loops[i].m2));
n2 = fold_build2 (MULT_EXPR, itype,
vs[i - fd->loops[i].outer], n2);
n2 = fold_build2 (PLUS_EXPR, itype, n2, t);
}
else
n2 = t;
n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
if (i == fd->last_nonrect)
{
gcond *cond_stmt
= gimple_build_cond (fd->loops[i].cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (cur_bb, cond_stmt);
e->flags = EDGE_TRUE_VALUE;
ne = make_edge (cur_bb, next_bb, EDGE_FALSE_VALUE);
e->probability = profile_probability::likely ().guessed ();
ne->probability = e->probability.invert ();
gsi2 = gsi_after_labels (e->dest);
t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loops[i].step), t);
t = fold_build2 (PLUS_EXPR, itype, t, n2);
t = fold_build2 (MINUS_EXPR, itype, t, n1);
tree step = fold_convert (itype, fd->loops[i].step);
if (TYPE_UNSIGNED (itype)
&& fd->loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (type, t);
t = fold_build2 (PLUS_EXPR, type,
counts[fd->last_nonrect], t);
t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
expand_omp_build_assign (&gsi2, counts[fd->last_nonrect], t);
e = make_edge (e->dest, next_bb, EDGE_FALLTHRU);
set_immediate_dominator (CDI_DOMINATORS, next_bb, cur_bb);
break;
}
e = split_block (cur_bb, last_stmt (cur_bb));
basic_block new_cur_bb = create_empty_bb (cur_bb);
add_bb_to_loop (new_cur_bb, cur_bb->loop_father);
gsi2 = gsi_after_labels (e->dest);
tree step = fold_convert (itype,
unshare_expr (fd->loops[i].step));
t = fold_build2 (PLUS_EXPR, itype, vs[i], step);
t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
expand_omp_build_assign (&gsi2, vs[i], t);
ne = split_block (e->dest, last_stmt (e->dest));
gsi2 = gsi_after_labels (ne->dest);
gcond *cond_stmt
= gimple_build_cond (fd->loops[i].cond_code, vs[i], n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
edge e3, e4;
if (next_bb == entry_bb)
{
e3 = find_edge (ne->dest, next_bb);
e3->flags = EDGE_FALSE_VALUE;
}
else
e3 = make_edge (ne->dest, next_bb, EDGE_FALSE_VALUE);
e4 = make_edge (ne->dest, new_cur_bb, EDGE_TRUE_VALUE);
e4->probability = profile_probability::likely ().guessed ();
e3->probability = e4->probability.invert ();
basic_block esrc = e->src;
make_edge (e->src, ne->dest, EDGE_FALLTHRU);
cur_bb = new_cur_bb;
basic_block latch_bb = next_bb;
next_bb = e->dest;
remove_edge (e);
set_immediate_dominator (CDI_DOMINATORS, ne->dest, esrc);
set_immediate_dominator (CDI_DOMINATORS, latch_bb, ne->dest);
set_immediate_dominator (CDI_DOMINATORS, cur_bb, ne->dest);
}
}
t = NULL_TREE;
for (i = fd->first_nonrect; i < fd->last_nonrect; i++)
if (!fd->loops[i].non_rect_referenced
&& fd->loops[i].m1 == NULL_TREE
&& fd->loops[i].m2 == NULL_TREE)
{
if (t == NULL_TREE)
t = counts[i];
else
t = fold_build2 (MULT_EXPR, type, t, counts[i]);
}
if (t)
{
t = fold_build2 (MULT_EXPR, type, counts[fd->last_nonrect], t);
expand_omp_build_assign (gsi, counts[fd->last_nonrect], t);
}
if (!rect_count_seen)
t = counts[fd->last_nonrect];
else
t = fold_build2 (MULT_EXPR, type, fd->loop.n2,
counts[fd->last_nonrect]);
expand_omp_build_assign (gsi, fd->loop.n2, t);
}
else if (fd->non_rect)
{
tree t = fd->loop.n2;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
int non_rect_referenced = 0, non_rect = 0;
for (i = 0; i < fd->collapse; i++)
{
if ((i < fd->first_nonrect || i > fd->last_nonrect)
&& !integer_zerop (counts[i]))
t = fold_build2 (TRUNC_DIV_EXPR, type, t, counts[i]);
if (fd->loops[i].non_rect_referenced)
non_rect_referenced++;
if (fd->loops[i].m1 || fd->loops[i].m2)
non_rect++;
}
gcc_assert (non_rect == 1 && non_rect_referenced == 1);
counts[fd->last_nonrect] = t;
}
}
/* Helper function for expand_omp_{for_*,simd}. Generate code like:
T = V;
V3 = N31 + (T % count3) * STEP3;
T = T / count3;
V2 = N21 + (T % count2) * STEP2;
T = T / count2;
V1 = N11 + T * STEP1;
if this loop doesn't have an inner loop construct combined with it.
If it does have an inner loop construct combined with it and the
iteration count isn't known constant, store values from counts array
into its _looptemp_ temporaries instead.
For non-rectangular loops (between fd->first_nonrect and fd->last_nonrect
inclusive), use the count of all those loops together, and either
find quadratic etc. equation roots, or as a fallback, do:
COUNT = 0;
for (tmpi = N11; tmpi COND1 N12; tmpi += STEP1)
for (tmpj = M21 * tmpi + N21;
tmpj COND2 M22 * tmpi + N22; tmpj += STEP2)
{
int tmpk1 = M31 * tmpj + N31;
int tmpk2 = M32 * tmpj + N32;
if (tmpk1 COND3 tmpk2)
{
if (COND3 is <)
adj = STEP3 - 1;
else
adj = STEP3 + 1;
int temp = (adj + tmpk2 - tmpk1) / STEP3;
if (COUNT + temp > T)
{
V1 = tmpi;
V2 = tmpj;
V3 = tmpk1 + (T - COUNT) * STEP3;
goto done;
}
else
COUNT += temp;
}
}
done:;
but for optional innermost or outermost rectangular loops that aren't
referenced by other loop expressions keep doing the division/modulo. */
static void
expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
tree *counts, tree *nonrect_bounds,
gimple *inner_stmt, tree startvar)
{
int i;
if (gimple_omp_for_combined_p (fd->for_stmt))
{
/* If fd->loop.n2 is constant, then no propagation of the counts
is needed, they are constant. */
if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
return;
tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
? gimple_omp_taskreg_clauses (inner_stmt)
: gimple_omp_for_clauses (inner_stmt);
/* First two _looptemp_ clauses are for istart/iend, counts[0]
isn't supposed to be handled, as the inner loop doesn't
use it. */
tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
int count = 0;
if (fd->non_rect
&& fd->last_nonrect == fd->first_nonrect + 1
&& !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v)))
count = 4;
for (i = 0; i < fd->collapse + count; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
if (i)
{
tree tem = OMP_CLAUSE_DECL (innerc);
tree t;
if (i < fd->collapse)
t = counts[i];
else
switch (i - fd->collapse)
{
case 0: t = counts[0]; break;
case 1: t = fd->first_inner_iterations; break;
case 2: t = fd->factor; break;
case 3: t = fd->adjn1; break;
default: gcc_unreachable ();
}
t = fold_convert (TREE_TYPE (tem), t);
t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
false, GSI_CONTINUE_LINKING);
gassign *stmt = gimple_build_assign (tem, t);
gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
}
}
return;
}
tree type = TREE_TYPE (fd->loop.v);
tree tem = create_tmp_reg (type, ".tem");
gassign *stmt = gimple_build_assign (tem, startvar);
gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
for (i = fd->collapse - 1; i >= 0; i--)
{
tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
itype = vtype;
if (POINTER_TYPE_P (vtype))
itype = signed_type_for (vtype);
if (i != 0 && (i != fd->last_nonrect || fd->first_nonrect))
t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
else
t = tem;
if (i == fd->last_nonrect)
{
t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree stopval = t;
tree idx = create_tmp_reg (type, ".count");
expand_omp_build_assign (gsi, idx,
build_zero_cst (type), true);
basic_block bb_triang = NULL, bb_triang_dom = NULL;
if (fd->first_nonrect + 1 == fd->last_nonrect
&& (TREE_CODE (fd->loop.n2) == INTEGER_CST
|| fd->first_inner_iterations)
&& (optab_handler (sqrt_optab, TYPE_MODE (double_type_node))
!= CODE_FOR_nothing))
{
tree outer_n1 = fd->adjn1 ? fd->adjn1 : fd->loops[i - 1].n1;
tree itype = TREE_TYPE (fd->loops[i].v);
tree first_inner_iterations = fd->first_inner_iterations;
tree factor = fd->factor;
gcond *cond_stmt
= gimple_build_cond (NE_EXPR, factor,
build_zero_cst (TREE_TYPE (factor)),
NULL_TREE, NULL_TREE);
gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING);
edge e = split_block (gsi_bb (*gsi), cond_stmt);
basic_block bb0 = e->src;
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::likely ();
bb_triang_dom = bb0;
*gsi = gsi_after_labels (e->dest);
tree slltype = long_long_integer_type_node;
tree ulltype = long_long_unsigned_type_node;
tree stopvalull = fold_convert (ulltype, stopval);
stopvalull
= force_gimple_operand_gsi (gsi, stopvalull, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
first_inner_iterations
= fold_convert (slltype, first_inner_iterations);
first_inner_iterations
= force_gimple_operand_gsi (gsi, first_inner_iterations, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
factor = fold_convert (slltype, factor);
factor
= force_gimple_operand_gsi (gsi, factor, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree first_inner_iterationsd
= fold_build1 (FLOAT_EXPR, double_type_node,
first_inner_iterations);
first_inner_iterationsd
= force_gimple_operand_gsi (gsi, first_inner_iterationsd, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
tree factord = fold_build1 (FLOAT_EXPR, double_type_node,
factor);
factord = force_gimple_operand_gsi (gsi, factord, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
tree stopvald = fold_build1 (FLOAT_EXPR, double_type_node,
stopvalull);
stopvald = force_gimple_operand_gsi (gsi, stopvald, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
/* Temporarily disable flag_rounding_math, values will be
decimal numbers divided by 2 and worst case imprecisions
due to too large values ought to be caught later by the
checks for fallback. */
int save_flag_rounding_math = flag_rounding_math;
flag_rounding_math = 0;
t = fold_build2 (RDIV_EXPR, double_type_node, factord,
build_real (double_type_node, dconst2));
tree t3 = fold_build2 (MINUS_EXPR, double_type_node,
first_inner_iterationsd, t);
t3 = force_gimple_operand_gsi (gsi, t3, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
t = fold_build2 (MULT_EXPR, double_type_node, factord,
build_real (double_type_node, dconst2));
t = fold_build2 (MULT_EXPR, double_type_node, t, stopvald);
t = fold_build2 (PLUS_EXPR, double_type_node, t,
fold_build2 (MULT_EXPR, double_type_node,
t3, t3));
flag_rounding_math = save_flag_rounding_math;
t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
if (flag_exceptions
&& cfun->can_throw_non_call_exceptions
&& operation_could_trap_p (LT_EXPR, true, false, NULL_TREE))
{
tree tem = fold_build2 (LT_EXPR, boolean_type_node, t,
build_zero_cst (double_type_node));
tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
cond_stmt = gimple_build_cond (NE_EXPR, tem,
boolean_false_node,
NULL_TREE, NULL_TREE);
}
else
cond_stmt
= gimple_build_cond (LT_EXPR, t,
build_zero_cst (double_type_node),
NULL_TREE, NULL_TREE);
gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING);
e = split_block (gsi_bb (*gsi), cond_stmt);
basic_block bb1 = e->src;
e->flags = EDGE_FALSE_VALUE;
e->probability = profile_probability::very_likely ();
*gsi = gsi_after_labels (e->dest);
gcall *call = gimple_build_call_internal (IFN_SQRT, 1, t);
tree sqrtr = create_tmp_var (double_type_node);
gimple_call_set_lhs (call, sqrtr);
gsi_insert_after (gsi, call, GSI_CONTINUE_LINKING);
t = fold_build2 (MINUS_EXPR, double_type_node, sqrtr, t3);
t = fold_build2 (RDIV_EXPR, double_type_node, t, factord);
t = fold_build1 (FIX_TRUNC_EXPR, ulltype, t);
tree c = create_tmp_var (ulltype);
tree d = create_tmp_var (ulltype);
expand_omp_build_assign (gsi, c, t, true);
t = fold_build2 (MINUS_EXPR, ulltype, c,
build_one_cst (ulltype));
t = fold_build2 (MULT_EXPR, ulltype, c, t);
t = fold_build2 (RSHIFT_EXPR, ulltype, t, integer_one_node);
t = fold_build2 (MULT_EXPR, ulltype,
fold_convert (ulltype, fd->factor), t);
tree t2
= fold_build2 (MULT_EXPR, ulltype, c,
fold_convert (ulltype,
fd->first_inner_iterations));
t = fold_build2 (PLUS_EXPR, ulltype, t, t2);
expand_omp_build_assign (gsi, d, t, true);
t = fold_build2 (MULT_EXPR, ulltype,
fold_convert (ulltype, fd->factor), c);
t = fold_build2 (PLUS_EXPR, ulltype,
t, fold_convert (ulltype,
fd->first_inner_iterations));
t2 = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
cond_stmt = gimple_build_cond (GE_EXPR, stopvalull, d,
NULL_TREE, NULL_TREE);
gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING);
e = split_block (gsi_bb (*gsi), cond_stmt);
basic_block bb2 = e->src;
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::very_likely ();
*gsi = gsi_after_labels (e->dest);
t = fold_build2 (PLUS_EXPR, ulltype, d, t2);
t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
cond_stmt = gimple_build_cond (GE_EXPR, stopvalull, t,
NULL_TREE, NULL_TREE);
gsi_insert_after (gsi, cond_stmt, GSI_CONTINUE_LINKING);
e = split_block (gsi_bb (*gsi), cond_stmt);
basic_block bb3 = e->src;
e->flags = EDGE_FALSE_VALUE;
e->probability = profile_probability::very_likely ();
*gsi = gsi_after_labels (e->dest);
t = fold_convert (itype, c);
t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i - 1].step);
t = fold_build2 (PLUS_EXPR, itype, outer_n1, t);
t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
expand_omp_build_assign (gsi, fd->loops[i - 1].v, t, true);
t2 = fold_build2 (MINUS_EXPR, ulltype, stopvalull, d);
t2 = fold_convert (itype, t2);
t2 = fold_build2 (MULT_EXPR, itype, t2, fd->loops[i].step);
t2 = fold_build2 (PLUS_EXPR, itype, t2, fd->loops[i].n1);
if (fd->loops[i].m1)
{
t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].m1);
t2 = fold_build2 (PLUS_EXPR, itype, t2, t);
}
expand_omp_build_assign (gsi, fd->loops[i].v, t2, true);
e = split_block (gsi_bb (*gsi), gsi_stmt (*gsi));
bb_triang = e->src;
*gsi = gsi_after_labels (e->dest);
remove_edge (e);
e = make_edge (bb1, gsi_bb (*gsi), EDGE_TRUE_VALUE);
e->probability = profile_probability::very_unlikely ();
e = make_edge (bb2, gsi_bb (*gsi), EDGE_FALSE_VALUE);
e->probability = profile_probability::very_unlikely ();
e = make_edge (bb3, gsi_bb (*gsi), EDGE_TRUE_VALUE);
e->probability = profile_probability::very_unlikely ();
basic_block bb4 = create_empty_bb (bb0);
add_bb_to_loop (bb4, bb0->loop_father);
e = make_edge (bb0, bb4, EDGE_FALSE_VALUE);
e->probability = profile_probability::unlikely ();
make_edge (bb4, gsi_bb (*gsi), EDGE_FALLTHRU);
set_immediate_dominator (CDI_DOMINATORS, bb4, bb0);
set_immediate_dominator (CDI_DOMINATORS, gsi_bb (*gsi), bb0);
gimple_stmt_iterator gsi2 = gsi_after_labels (bb4);
t2 = fold_build2 (TRUNC_DIV_EXPR, type,
counts[i], counts[i - 1]);
t2 = force_gimple_operand_gsi (&gsi2, t2, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
t = fold_build2 (TRUNC_MOD_EXPR, type, stopval, t2);
t2 = fold_build2 (TRUNC_DIV_EXPR, type, stopval, t2);
t = fold_convert (itype, t);
t2 = fold_convert (itype, t2);
t = fold_build2 (MULT_EXPR, itype, t,
fold_convert (itype, fd->loops[i].step));
t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
t2 = fold_build2 (MULT_EXPR, itype, t2,
fold_convert (itype, fd->loops[i - 1].step));
t2 = fold_build2 (PLUS_EXPR, itype, fd->loops[i - 1].n1, t2);
t2 = force_gimple_operand_gsi (&gsi2, t2, false, NULL_TREE,
false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loops[i - 1].v, t2);
gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING);
if (fd->loops[i].m1)
{
t2 = fold_build2 (MULT_EXPR, itype, fd->loops[i].m1,
fd->loops[i - 1].v);
t = fold_build2 (PLUS_EXPR, itype, t, t2);
}
t = force_gimple_operand_gsi (&gsi2, t, false, NULL_TREE,
false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loops[i].v, t);
gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING);
}
/* Fallback implementation. Evaluate the loops in between
(inclusive) fd->first_nonrect and fd->last_nonrect at
runtime unsing temporaries instead of the original iteration
variables, in the body just bump the counter and compare
with the desired value. */
gimple_stmt_iterator gsi2 = *gsi;
basic_block entry_bb = gsi_bb (gsi2);
edge e = split_block (entry_bb, gsi_stmt (gsi2));
e = split_block (e->dest, (gimple *) NULL);
basic_block dom_bb = NULL;
basic_block cur_bb = e->src;
basic_block next_bb = e->dest;
entry_bb = e->dest;
*gsi = gsi_after_labels (entry_bb);
tree *vs = XALLOCAVEC (tree, fd->last_nonrect);
tree n1 = NULL_TREE, n2 = NULL_TREE;
memset (vs, 0, fd->last_nonrect * sizeof (tree));
for (int j = fd->first_nonrect; j <= fd->last_nonrect; j++)
{
tree itype = TREE_TYPE (fd->loops[j].v);
bool rect_p = (fd->loops[j].m1 == NULL_TREE
&& fd->loops[j].m2 == NULL_TREE
&& !fd->loops[j].non_rect_referenced);
gsi2 = gsi_after_labels (cur_bb);
t = fold_convert (itype, unshare_expr (fd->loops[j].n1));
if (fd->loops[j].m1)
{
n1 = fold_convert (itype, unshare_expr (fd->loops[j].m1));
n1 = fold_build2 (MULT_EXPR, itype,
vs[j - fd->loops[j].outer], n1);
n1 = fold_build2 (PLUS_EXPR, itype, n1, t);
}
else if (rect_p)
n1 = build_zero_cst (type);
else
n1 = t;
n1 = force_gimple_operand_gsi (&gsi2, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
if (j < fd->last_nonrect)
{
vs[j] = create_tmp_reg (rect_p ? type : itype, ".it");
expand_omp_build_assign (&gsi2, vs[j], n1);
}
t = fold_convert (itype, unshare_expr (fd->loops[j].n2));
if (fd->loops[j].m2)
{
n2 = fold_convert (itype, unshare_expr (fd->loops[j].m2));
n2 = fold_build2 (MULT_EXPR, itype,
vs[j - fd->loops[j].outer], n2);
n2 = fold_build2 (PLUS_EXPR, itype, n2, t);
}
else if (rect_p)
n2 = counts[j];
else
n2 = t;
n2 = force_gimple_operand_gsi (&gsi2, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
if (j == fd->last_nonrect)
{
gcond *cond_stmt
= gimple_build_cond (fd->loops[j].cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (cur_bb, cond_stmt);
e->flags = EDGE_TRUE_VALUE;
edge ne = make_edge (cur_bb, next_bb, EDGE_FALSE_VALUE);
e->probability = profile_probability::likely ().guessed ();
ne->probability = e->probability.invert ();
gsi2 = gsi_after_labels (e->dest);
t = build_int_cst (itype, (fd->loops[j].cond_code == LT_EXPR
? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loops[j].step), t);
t = fold_build2 (PLUS_EXPR, itype, t, n2);
t = fold_build2 (MINUS_EXPR, itype, t, n1);
tree step = fold_convert (itype, fd->loops[j].step);
if (TYPE_UNSIGNED (itype)
&& fd->loops[j].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (type, t);
t = fold_build2 (PLUS_EXPR, type, idx, t);
t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
e = make_edge (e->dest, next_bb, EDGE_FALLTHRU);
set_immediate_dominator (CDI_DOMINATORS, next_bb, cur_bb);
cond_stmt
= gimple_build_cond (LE_EXPR, t, stopval, NULL_TREE,
NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
e = split_block (gsi_bb (gsi2), cond_stmt);
e->flags = EDGE_TRUE_VALUE;
e->probability = profile_probability::likely ().guessed ();
ne = make_edge (e->src, entry_bb, EDGE_FALSE_VALUE);
ne->probability = e->probability.invert ();
gsi2 = gsi_after_labels (e->dest);
expand_omp_build_assign (&gsi2, idx, t);
set_immediate_dominator (CDI_DOMINATORS, entry_bb, dom_bb);
break;
}
e = split_block (cur_bb, last_stmt (cur_bb));
basic_block new_cur_bb = create_empty_bb (cur_bb);
add_bb_to_loop (new_cur_bb, cur_bb->loop_father);
gsi2 = gsi_after_labels (e->dest);
if (rect_p)
t = fold_build2 (PLUS_EXPR, type, vs[j],
build_one_cst (type));
else
{
tree step
= fold_convert (itype, unshare_expr (fd->loops[j].step));
t = fold_build2 (PLUS_EXPR, itype, vs[j], step);
}
t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
expand_omp_build_assign (&gsi2, vs[j], t);
edge ne = split_block (e->dest, last_stmt (e->dest));
gsi2 = gsi_after_labels (ne->dest);
gcond *cond_stmt;
if (next_bb == entry_bb)
/* No need to actually check the outermost condition. */
cond_stmt
= gimple_build_cond (EQ_EXPR, boolean_true_node,
boolean_true_node,
NULL_TREE, NULL_TREE);
else
cond_stmt
= gimple_build_cond (rect_p ? LT_EXPR
: fd->loops[j].cond_code,
vs[j], n2, NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi2, cond_stmt, GSI_SAME_STMT);
edge e3, e4;
if (next_bb == entry_bb)
{
e3 = find_edge (ne->dest, next_bb);
e3->flags = EDGE_FALSE_VALUE;
dom_bb = ne->dest;
}
else
e3 = make_edge (ne->dest, next_bb, EDGE_FALSE_VALUE);
e4 = make_edge (ne->dest, new_cur_bb, EDGE_TRUE_VALUE);
e4->probability = profile_probability::likely ().guessed ();
e3->probability = e4->probability.invert ();
basic_block esrc = e->src;
make_edge (e->src, ne->dest, EDGE_FALLTHRU);
cur_bb = new_cur_bb;
basic_block latch_bb = next_bb;
next_bb = e->dest;
remove_edge (e);
set_immediate_dominator (CDI_DOMINATORS, ne->dest, esrc);
set_immediate_dominator (CDI_DOMINATORS, latch_bb, ne->dest);
set_immediate_dominator (CDI_DOMINATORS, cur_bb, ne->dest);
}
for (int j = fd->last_nonrect; j >= fd->first_nonrect; j--)
{
tree itype = TREE_TYPE (fd->loops[j].v);
bool rect_p = (fd->loops[j].m1 == NULL_TREE
&& fd->loops[j].m2 == NULL_TREE
&& !fd->loops[j].non_rect_referenced);
if (j == fd->last_nonrect)
{
t = fold_build2 (MINUS_EXPR, type, stopval, idx);
t = fold_convert (itype, t);
tree t2
= fold_convert (itype, unshare_expr (fd->loops[j].step));
t = fold_build2 (MULT_EXPR, itype, t, t2);
t = fold_build2 (PLUS_EXPR, itype, n1, t);
}
else if (rect_p)
{
t = fold_convert (itype, vs[j]);
t = fold_build2 (MULT_EXPR, itype, t,
fold_convert (itype, fd->loops[j].step));
if (POINTER_TYPE_P (vtype))
t = fold_build_pointer_plus (fd->loops[j].n1, t);
else
t = fold_build2 (PLUS_EXPR, itype, fd->loops[j].n1, t);
}
else
t = vs[j];
t = force_gimple_operand_gsi (gsi, t, false,
NULL_TREE, true,
GSI_SAME_STMT);
stmt = gimple_build_assign (fd->loops[j].v, t);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
}
if (gsi_end_p (*gsi))
*gsi = gsi_last_bb (gsi_bb (*gsi));
else
gsi_prev (gsi);
if (bb_triang)
{
e = split_block (gsi_bb (*gsi), gsi_stmt (*gsi));
make_edge (bb_triang, e->dest, EDGE_FALLTHRU);
*gsi = gsi_after_labels (e->dest);
if (!gsi_end_p (*gsi))
gsi_insert_before (gsi, gimple_build_nop (), GSI_NEW_STMT);
set_immediate_dominator (CDI_DOMINATORS, e->dest, bb_triang_dom);
}
}
else
{
t = fold_convert (itype, t);
t = fold_build2 (MULT_EXPR, itype, t,
fold_convert (itype, fd->loops[i].step));
if (POINTER_TYPE_P (vtype))
t = fold_build_pointer_plus (fd->loops[i].n1, t);
else
t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
t = force_gimple_operand_gsi (gsi, t,
DECL_P (fd->loops[i].v)
&& TREE_ADDRESSABLE (fd->loops[i].v),
NULL_TREE, false,
GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loops[i].v, t);
gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
}
if (i != 0 && (i != fd->last_nonrect || fd->first_nonrect))
{
t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (tem, t);
gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
}
if (i == fd->last_nonrect)
i = fd->first_nonrect;
}
if (fd->non_rect)
for (i = 0; i <= fd->last_nonrect; i++)
if (fd->loops[i].m2)
{
tree itype = TREE_TYPE (fd->loops[i].v);
tree t = fold_convert (itype, unshare_expr (fd->loops[i].m2));
t = fold_build2 (MULT_EXPR, itype,
fd->loops[i - fd->loops[i].outer].v, t);
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype,
unshare_expr (fd->loops[i].n2)));
nonrect_bounds[i] = create_tmp_reg (itype, ".bound");
t = force_gimple_operand_gsi (gsi, t, false,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (nonrect_bounds[i], t);
gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
}
}
/* Helper function for expand_omp_for_*. Generate code like:
L10:
V3 += STEP3;
if (V3 cond3 N32) goto BODY_BB; else goto L11;
L11:
V3 = N31;
V2 += STEP2;
if (V2 cond2 N22) goto BODY_BB; else goto L12;
L12:
V2 = N21;
V1 += STEP1;
goto BODY_BB;
For non-rectangular loops, use temporaries stored in nonrect_bounds
for the upper bounds if M?2 multiplier is present. Given e.g.
for (V1 = N11; V1 cond1 N12; V1 += STEP1)
for (V2 = N21; V2 cond2 N22; V2 += STEP2)
for (V3 = N31; V3 cond3 N32; V3 += STEP3)
for (V4 = N41 + M41 * V2; V4 cond4 N42 + M42 * V2; V4 += STEP4)
do:
L10:
V4 += STEP4;
if (V4 cond4 NONRECT_BOUND4) goto BODY_BB; else goto L11;
L11:
V4 = N41 + M41 * V2; // This can be left out if the loop
// refers to the immediate parent loop
V3 += STEP3;
if (V3 cond3 N32) goto BODY_BB; else goto L12;
L12:
V3 = N31;
V2 += STEP2;
if (V2 cond2 N22) goto L120; else goto L13;
L120:
V4 = N41 + M41 * V2;
NONRECT_BOUND4 = N42 + M42 * V2;
if (V4 cond4 NONRECT_BOUND4) goto BODY_BB; else goto L12;
L13:
V2 = N21;
V1 += STEP1;
goto L120; */
static basic_block
extract_omp_for_update_vars (struct omp_for_data *fd, tree *nonrect_bounds,
basic_block cont_bb, basic_block body_bb)
{
basic_block last_bb, bb, collapse_bb = NULL;
int i;
gimple_stmt_iterator gsi;
edge e;
tree t;
gimple *stmt;
last_bb = cont_bb;
for (i = fd->collapse - 1; i >= 0; i--)
{
tree vtype = TREE_TYPE (fd->loops[i].v);
bb = create_empty_bb (last_bb);
add_bb_to_loop (bb, last_bb->loop_father);
gsi = gsi_start_bb (bb);
if (i < fd->collapse - 1)
{
e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
e->probability
= profile_probability::guessed_always ().apply_scale (1, 8);
struct omp_for_data_loop *l = &fd->loops[i + 1];
if (l->m1 == NULL_TREE || l->outer != 1)
{
t = l->n1;
if (l->m1)
{
tree t2
= fold_build2 (MULT_EXPR, TREE_TYPE (t),
fd->loops[i + 1 - l->outer].v, l->m1);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t2, t);
}
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (l->v)
&& TREE_ADDRESSABLE (l->v),
NULL_TREE, false,
GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (l->v, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
}
}
else
collapse_bb = bb;
set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
if (POINTER_TYPE_P (vtype))
t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
else
t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (fd->loops[i].v)
&& TREE_ADDRESSABLE (fd->loops[i].v),
NULL_TREE, false, GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (fd->loops[i].v, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
if (fd->loops[i].non_rect_referenced)
{
basic_block update_bb = NULL, prev_bb = NULL;
for (int j = i + 1; j <= fd->last_nonrect; j++)
if (j - fd->loops[j].outer == i)
{
tree n1, n2;
struct omp_for_data_loop *l = &fd->loops[j];
basic_block this_bb = create_empty_bb (last_bb);
add_bb_to_loop (this_bb, last_bb->loop_father);
gimple_stmt_iterator gsi2 = gsi_start_bb (this_bb);
if (prev_bb)
{
e = make_edge (prev_bb, this_bb, EDGE_TRUE_VALUE);
e->probability
= profile_probability::guessed_always ().apply_scale (7,
8);
set_immediate_dominator (CDI_DOMINATORS, this_bb, prev_bb);
}
if (l->m1)
{
t = fold_build2 (MULT_EXPR, TREE_TYPE (l->m1), l->m1,
fd->loops[i].v);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (l->v), t, l->n1);
n1 = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
false,
GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (l->v, n1);
gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING);
n1 = l->v;
}
else
n1 = force_gimple_operand_gsi (&gsi2, l->n1, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
if (l->m2)
{
t = fold_build2 (MULT_EXPR, TREE_TYPE (l->m2), l->m2,
fd->loops[i].v);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (nonrect_bounds[j]),
t, unshare_expr (l->n2));
n2 = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
false,
GSI_CONTINUE_LINKING);
stmt = gimple_build_assign (nonrect_bounds[j], n2);
gsi_insert_after (&gsi2, stmt, GSI_CONTINUE_LINKING);
n2 = nonrect_bounds[j];
}
else
n2 = force_gimple_operand_gsi (&gsi2, unshare_expr (l->n2),
true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
gcond *cond_stmt
= gimple_build_cond (l->cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_after (&gsi2, cond_stmt, GSI_CONTINUE_LINKING);
if (update_bb == NULL)
update_bb = this_bb;
e = make_edge (this_bb, bb, EDGE_FALSE_VALUE);
e->probability
= profile_probability::guessed_always ().apply_scale (1, 8);
if (prev_bb == NULL)
set_immediate_dominator (CDI_DOMINATORS, this_bb, bb);
prev_bb = this_bb;
}
e = make_edge (prev_bb, body_bb, EDGE_TRUE_VALUE);
e->probability
= profile_probability::guessed_always ().apply_scale (7, 8);
body_bb = update_bb;
}
if (i > 0)
{
if (fd->loops[i].m2)
t = nonrect_bounds[i];
else
t = unshare_expr (fd->loops[i].n2);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree v = fd->loops[i].v;
if (DECL_P (v) && TREE_ADDRESSABLE (v))
v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
stmt = gimple_build_cond_empty (t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
if (walk_tree (gimple_cond_lhs_ptr (as_a <gcond *> (stmt)),
expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (as_a <gcond *> (stmt)),
expand_omp_regimplify_p, NULL, NULL))
gimple_regimplify_operands (stmt, &gsi);
e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
}
else
make_edge (bb, body_bb, EDGE_FALLTHRU);
set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
last_bb = bb;
}
return collapse_bb;
}
/* Expand #pragma omp ordered depend(source). */
static void
expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
tree *counts, location_t loc)
{
enum built_in_function source_ix
= fd->iter_type == long_integer_type_node
? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
gimple *g
= gimple_build_call (builtin_decl_explicit (source_ix), 1,
build_fold_addr_expr (counts[fd->ordered]));
gimple_set_location (g, loc);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
}
/* Expand a single depend from #pragma omp ordered depend(sink:...). */
static void
expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
tree *counts, tree c, location_t loc)
{
auto_vec<tree, 10> args;
enum built_in_function sink_ix
= fd->iter_type == long_integer_type_node
? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
int i;
gimple_stmt_iterator gsi2 = *gsi;
bool warned_step = false;
for (i = 0; i < fd->ordered; i++)
{
tree step = NULL_TREE;
off = TREE_PURPOSE (deps);
if (TREE_CODE (off) == TRUNC_DIV_EXPR)
{
step = TREE_OPERAND (off, 1);
off = TREE_OPERAND (off, 0);
}
if (!integer_zerop (off))
{
gcc_assert (fd->loops[i].cond_code == LT_EXPR
|| fd->loops[i].cond_code == GT_EXPR);
bool forward = fd->loops[i].cond_code == LT_EXPR;
if (step)
{
/* Non-simple Fortran DO loops. If step is variable,
we don't know at compile even the direction, so can't
warn. */
if (TREE_CODE (step) != INTEGER_CST)
break;
forward = tree_int_cst_sgn (step) != -1;
}
if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier "
"waiting for lexically later iteration");
break;
}
deps = TREE_CHAIN (deps);
}
/* If all offsets corresponding to the collapsed loops are zero,
this depend clause can be ignored. FIXME: but there is still a
flush needed. We need to emit one __sync_synchronize () for it
though (perhaps conditionally)? Solve this together with the
conservative dependence folding optimization.
if (i >= fd->collapse)
return; */
deps = OMP_CLAUSE_DECL (c);
gsi_prev (&gsi2);
edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
edge e2 = split_block_after_labels (e1->dest);
gsi2 = gsi_after_labels (e1->dest);
*gsi = gsi_last_bb (e1->src);
for (i = 0; i < fd->ordered; i++)
{
tree itype = TREE_TYPE (fd->loops[i].v);
tree step = NULL_TREE;
tree orig_off = NULL_TREE;
if (POINTER_TYPE_P (itype))
itype = sizetype;
if (i)
deps = TREE_CHAIN (deps);
off = TREE_PURPOSE (deps);
if (TREE_CODE (off) == TRUNC_DIV_EXPR)
{
step = TREE_OPERAND (off, 1);
off = TREE_OPERAND (off, 0);
gcc_assert (fd->loops[i].cond_code == LT_EXPR
&& integer_onep (fd->loops[i].step)
&& !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)));
}
tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step);
if (step)
{
off = fold_convert_loc (loc, itype, off);
orig_off = off;
off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
}
if (integer_zerop (off))
t = boolean_true_node;
else
{
tree a;
tree co = fold_convert_loc (loc, itype, off);
if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
{
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
co);
}
else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, co);
else
a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, co);
if (step)
{
tree t1, t2;
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
fd->loops[i].n1);
else
t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
fd->loops[i].n2);
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
fd->loops[i].n2);
else
t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
fd->loops[i].n1);
t = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
step, build_int_cst (TREE_TYPE (step), 0));
if (TREE_CODE (step) != INTEGER_CST)
{
t1 = unshare_expr (t1);
t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t2 = unshare_expr (t2);
t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
}
t = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
t, t2, t1);
}
else if (fd->loops[i].cond_code == LT_EXPR)
{
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
fd->loops[i].n1);
else
t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
fd->loops[i].n2);
}
else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
fd->loops[i].n2);
else
t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
fd->loops[i].n1);
}
if (cond)
cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
else
cond = t;
off = fold_convert_loc (loc, itype, off);
if (step
|| (fd->loops[i].cond_code == LT_EXPR
? !integer_onep (fd->loops[i].step)
: !integer_minus_onep (fd->loops[i].step)))
{
if (step == NULL_TREE
&& TYPE_UNSIGNED (itype)
&& fd->loops[i].cond_code == GT_EXPR)
t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
fold_build1_loc (loc, NEGATE_EXPR, itype,
s));
else
t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype,
orig_off ? orig_off : off, s);
t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
build_int_cst (itype, 0));
if (integer_zerop (t) && !warned_step)
{
warning_at (loc, 0, "%<depend%> clause with %<sink%> modifier "
"refers to iteration never in the iteration "
"space");
warned_step = true;
}
cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
cond, t);
}
if (i <= fd->collapse - 1 && fd->collapse > 1)
t = fd->loop.v;
else if (counts[i])
t = counts[i];
else
{
t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, fd->loops[i].n1);
t = fold_convert_loc (loc, fd->iter_type, t);
}
if (step)
/* We have divided off by step already earlier. */;
else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
fold_build1_loc (loc, NEGATE_EXPR, itype,
s));
else
off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
off = fold_convert_loc (loc, fd->iter_type, off);
if (i <= fd->collapse - 1 && fd->collapse > 1)
{
if (i)
off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
off);
if (i < fd->collapse - 1)
{
coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
counts[i]);
continue;
}
}
off = unshare_expr (off);
t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
args.safe_push (t);
}
gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
gimple_set_location (g, loc);
gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
cond = unshare_expr (cond);
cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
GSI_CONTINUE_LINKING);
gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
e1->probability = e3->probability.invert ();
e1->flags = EDGE_TRUE_VALUE;
set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
*gsi = gsi_after_labels (e2->dest);
}
/* Expand all #pragma omp ordered depend(source) and
#pragma omp ordered depend(sink:...) constructs in the current
#pragma omp for ordered(n) region. */
static void
expand_omp_ordered_source_sink (struct omp_region *region,
struct omp_for_data *fd, tree *counts,
basic_block cont_bb)
{
struct omp_region *inner;
int i;
for (i = fd->collapse - 1; i < fd->ordered; i++)
if (i == fd->collapse - 1 && fd->collapse > 1)
counts[i] = NULL_TREE;
else if (i >= fd->collapse && !cont_bb)
counts[i] = build_zero_cst (fd->iter_type);
else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
&& integer_onep (fd->loops[i].step))
counts[i] = NULL_TREE;
else
counts[i] = create_tmp_var (fd->iter_type, ".orditer");
tree atype
= build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
counts[fd->ordered] = create_tmp_var (atype, ".orditera");
TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
for (inner = region->inner; inner; inner = inner->next)
if (inner->type == GIMPLE_OMP_ORDERED)
{
gomp_ordered *ord_stmt = inner->ord_stmt;
gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
location_t loc = gimple_location (ord_stmt);
tree c;
for (c = gimple_omp_ordered_clauses (ord_stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
break;
if (c)
expand_omp_ordered_source (&gsi, fd, counts, loc);
for (c = gimple_omp_ordered_clauses (ord_stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
gsi_remove (&gsi, true);
}
}
/* Wrap the body into fd->ordered - fd->collapse loops that aren't
collapsed. */
static basic_block
expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
basic_block cont_bb, basic_block body_bb,
bool ordered_lastprivate)
{
if (fd->ordered == fd->collapse)
return cont_bb;
if (!cont_bb)
{
gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
for (int i = fd->collapse; i < fd->ordered; i++)
{
tree type = TREE_TYPE (fd->loops[i].v);
tree n1 = fold_convert (type, fd->loops[i].n1);
expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
size_int (i - fd->collapse + 1),
NULL_TREE, NULL_TREE);
expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
}
return NULL;
}
for (int i = fd->ordered - 1; i >= fd->collapse; i--)
{
tree t, type = TREE_TYPE (fd->loops[i].v);
gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
expand_omp_build_assign (&gsi, fd->loops[i].v,
fold_convert (type, fd->loops[i].n1));
if (counts[i])
expand_omp_build_assign (&gsi, counts[i],
build_zero_cst (fd->iter_type));
tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
size_int (i - fd->collapse + 1),
NULL_TREE, NULL_TREE);
expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
if (!gsi_end_p (gsi))
gsi_prev (&gsi);
else
gsi = gsi_last_bb (body_bb);
edge e1 = split_block (body_bb, gsi_stmt (gsi));
basic_block new_body = e1->dest;
if (body_bb == cont_bb)
cont_bb = new_body;
edge e2 = NULL;
basic_block new_header;
if (EDGE_COUNT (cont_bb->preds) > 0)
{
gsi = gsi_last_bb (cont_bb);
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (fd->loops[i].v,
fold_convert (sizetype,
fd->loops[i].step));
else
t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
fold_convert (type, fd->loops[i].step));
expand_omp_build_assign (&gsi, fd->loops[i].v, t);
if (counts[i])
{
t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
build_int_cst (fd->iter_type, 1));
expand_omp_build_assign (&gsi, counts[i], t);
t = counts[i];
}
else
{
t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, fd->loops[i].n1);
t = fold_convert (fd->iter_type, t);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
}
aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
size_int (i - fd->collapse + 1),
NULL_TREE, NULL_TREE);
expand_omp_build_assign (&gsi, aref, t);
gsi_prev (&gsi);
e2 = split_block (cont_bb, gsi_stmt (gsi));
new_header = e2->dest;
}
else
new_header = cont_bb;
gsi = gsi_after_labels (new_header);
tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
true, GSI_SAME_STMT);
tree n2
= force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
true, NULL_TREE, true, GSI_SAME_STMT);
t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
edge e3 = split_block (new_header, gsi_stmt (gsi));
cont_bb = e3->dest;
remove_edge (e1);
make_edge (body_bb, new_header, EDGE_FALLTHRU);
e3->flags = EDGE_FALSE_VALUE;
e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
e1->probability = e3->probability.invert ();
set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
if (e2)
{
class loop *loop = alloc_loop ();
loop->header = new_header;
loop->latch = e2->src;
add_loop (loop, body_bb->loop_father);
}
}
/* If there are any lastprivate clauses and it is possible some loops
might have zero iterations, ensure all the decls are initialized,
otherwise we could crash evaluating C++ class iterators with lastprivate
clauses. */
bool need_inits = false;
for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
if (need_inits)
{
tree type = TREE_TYPE (fd->loops[i].v);
gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
expand_omp_build_assign (&gsi, fd->loops[i].v,
fold_convert (type, fd->loops[i].n1));
}
else
{
tree type = TREE_TYPE (fd->loops[i].v);
tree this_cond = fold_build2 (fd->loops[i].cond_code,
boolean_type_node,
fold_convert (type, fd->loops[i].n1),
fold_convert (type, fd->loops[i].n2));
if (!integer_onep (this_cond))
need_inits = true;
}
return cont_bb;
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with any schedule. Given parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
if (more) goto L0; else goto L3;
L0:
V = istart0;
iend = iend0;
L1:
BODY;
V += STEP;
if (V cond iend) goto L1; else goto L2;
L2:
if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
L3:
If this is a combined omp parallel loop, instead of the call to
GOMP_loop_foo_start, we call GOMP_loop_foo_next.
If this is gimple_omp_for_combined_p loop, then instead of assigning
V and iend in L0 we assign the first two _looptemp_ clause decls of the
inner GIMPLE_OMP_FOR and V += STEP; and
if (V cond iend) goto L1; else goto L2; are removed.
For collapsed loops, given parameters:
collapse(3)
for (V1 = N11; V1 cond1 N12; V1 += STEP1)
for (V2 = N21; V2 cond2 N22; V2 += STEP2)
for (V3 = N31; V3 cond3 N32; V3 += STEP3)
BODY;
we generate pseudocode
if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
if (cond3 is <)
adj = STEP3 - 1;
else
adj = STEP3 + 1;
count3 = (adj + N32 - N31) / STEP3;
if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
if (cond2 is <)
adj = STEP2 - 1;
else
adj = STEP2 + 1;
count2 = (adj + N22 - N21) / STEP2;
if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
if (cond1 is <)
adj = STEP1 - 1;
else
adj = STEP1 + 1;
count1 = (adj + N12 - N11) / STEP1;
count = count1 * count2 * count3;
goto Z1;
Z0:
count = 0;
Z1:
more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
if (more) goto L0; else goto L3;
L0:
V = istart0;
T = V;
V3 = N31 + (T % count3) * STEP3;
T = T / count3;
V2 = N21 + (T % count2) * STEP2;
T = T / count2;
V1 = N11 + T * STEP1;
iend = iend0;
L1:
BODY;
V += 1;
if (V < iend) goto L10; else goto L2;
L10:
V3 += STEP3;
if (V3 cond3 N32) goto L1; else goto L11;
L11:
V3 = N31;
V2 += STEP2;
if (V2 cond2 N22) goto L1; else goto L12;
L12:
V2 = N21;
V1 += STEP1;
goto L1;
L2:
if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
L3:
*/
static void
expand_omp_for_generic (struct omp_region *region,
struct omp_for_data *fd,
enum built_in_function start_fn,
enum built_in_function next_fn,
tree sched_arg,
gimple *inner_stmt)
{
tree type, istart0, iend0, iend;
tree t, vmain, vback, bias = NULL_TREE;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
basic_block l2_bb = NULL, l3_bb = NULL;
gimple_stmt_iterator gsi;
gassign *assign_stmt;
bool in_combined_parallel = is_combined_parallel (region);
bool broken_loop = region->cont == NULL;
edge e, ne;
tree *counts = NULL;
int i;
bool ordered_lastprivate = false;
gcc_assert (!broken_loop || !in_combined_parallel);
gcc_assert (fd->iter_type == long_integer_type_node
|| !in_combined_parallel);
entry_bb = region->entry;
cont_bb = region->cont;
collapse_bb = NULL;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
gcc_assert (broken_loop
|| BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
l1_bb = single_succ (l0_bb);
if (!broken_loop)
{
l2_bb = create_empty_bb (cont_bb);
gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
|| (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
== l1_bb));
gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
}
else
l2_bb = NULL;
l3_bb = BRANCH_EDGE (entry_bb)->dest;
exit_bb = region->exit;
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->ordered
&& omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE_LASTPRIVATE))
ordered_lastprivate = false;
tree reductions = NULL_TREE;
tree mem = NULL_TREE, cond_var = NULL_TREE, condtemp = NULL_TREE;
tree memv = NULL_TREE;
if (fd->lastprivate_conditional)
{
tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__CONDTEMP_);
if (fd->have_pointer_condtemp)
condtemp = OMP_CLAUSE_DECL (c);
c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_);
cond_var = OMP_CLAUSE_DECL (c);
}
if (sched_arg)
{
if (fd->have_reductemp)
{
tree c = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__REDUCTEMP_);
reductions = OMP_CLAUSE_DECL (c);
gcc_assert (TREE_CODE (reductions) == SSA_NAME);
gimple *g = SSA_NAME_DEF_STMT (reductions);
reductions = gimple_assign_rhs1 (g);
OMP_CLAUSE_DECL (c) = reductions;
entry_bb = gimple_bb (g);
edge e = split_block (entry_bb, g);
if (region->entry == entry_bb)
region->entry = e->dest;
gsi = gsi_last_bb (entry_bb);
}
else
reductions = null_pointer_node;
if (fd->have_pointer_condtemp)
{
tree type = TREE_TYPE (condtemp);
memv = create_tmp_var (type);
TREE_ADDRESSABLE (memv) = 1;
unsigned HOST_WIDE_INT sz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type)));
sz *= fd->lastprivate_conditional;
expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz),
false);
mem = build_fold_addr_expr (memv);
}
else
mem = null_pointer_node;
}
if (fd->collapse > 1 || fd->ordered)
{
int first_zero_iter1 = -1, first_zero_iter2 = -1;
basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
zero_iter1_bb, first_zero_iter1,
zero_iter2_bb, first_zero_iter2, l2_dom_bb);
if (zero_iter1_bb)
{
/* Some counts[i] vars might be uninitialized if
some loop has zero iterations. But the body shouldn't
be executed in that case, so just avoid uninit warnings. */
for (i = first_zero_iter1;
i < (fd->ordered ? fd->ordered : fd->collapse); i++)
if (SSA_VAR_P (counts[i]))
TREE_NO_WARNING (counts[i]) = 1;
gsi_prev (&gsi);
e = split_block (entry_bb, gsi_stmt (gsi));
entry_bb = e->dest;
make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
gsi = gsi_last_nondebug_bb (entry_bb);
set_immediate_dominator (CDI_DOMINATORS, entry_bb,
get_immediate_dominator (CDI_DOMINATORS,
zero_iter1_bb));
}
if (zero_iter2_bb)
{
/* Some counts[i] vars might be uninitialized if
some loop has zero iterations. But the body shouldn't
be executed in that case, so just avoid uninit warnings. */
for (i = first_zero_iter2; i < fd->ordered; i++)
if (SSA_VAR_P (counts[i]))
TREE_NO_WARNING (counts[i]) = 1;
if (zero_iter1_bb)
make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
else
{
gsi_prev (&gsi);
e = split_block (entry_bb, gsi_stmt (gsi));
entry_bb = e->dest;
make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
gsi = gsi_last_nondebug_bb (entry_bb);
set_immediate_dominator (CDI_DOMINATORS, entry_bb,
get_immediate_dominator
(CDI_DOMINATORS, zero_iter2_bb));
}
}
if (fd->collapse == 1)
{
counts[0] = fd->loop.n2;
fd->loop = fd->loops[0];
}
}
type = TREE_TYPE (fd->loop.v);
istart0 = create_tmp_var (fd->iter_type, ".istart0");
iend0 = create_tmp_var (fd->iter_type, ".iend0");
TREE_ADDRESSABLE (istart0) = 1;
TREE_ADDRESSABLE (iend0) = 1;
/* See if we need to bias by LLONG_MIN. */
if (fd->iter_type == long_long_unsigned_type_node
&& TREE_CODE (type) == INTEGER_TYPE
&& !TYPE_UNSIGNED (type)
&& fd->ordered == 0)
{
tree n1, n2;
if (fd->loop.cond_code == LT_EXPR)
{
n1 = fd->loop.n1;
n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
}
else
{
n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
n2 = fd->loop.n1;
}
if (TREE_CODE (n1) != INTEGER_CST
|| TREE_CODE (n2) != INTEGER_CST
|| ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
}
gimple_stmt_iterator gsif = gsi;
gsi_prev (&gsif);
tree arr = NULL_TREE;
if (in_combined_parallel)
{
gcc_assert (fd->ordered == 0);
/* In a combined parallel loop, emit a call to
GOMP_loop_foo_next. */
t = build_call_expr (builtin_decl_explicit (next_fn), 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
}
else
{
tree t0, t1, t2, t3, t4;
/* If this is not a combined parallel loop, emit a call to
GOMP_loop_foo_start in ENTRY_BB. */
t4 = build_fold_addr_expr (iend0);
t3 = build_fold_addr_expr (istart0);
if (fd->ordered)
{
t0 = build_int_cst (unsigned_type_node,
fd->ordered - fd->collapse + 1);
arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
fd->ordered
- fd->collapse + 1),
".omp_counts");
DECL_NAMELESS (arr) = 1;
TREE_ADDRESSABLE (arr) = 1;
TREE_STATIC (arr) = 1;
vec<constructor_elt, va_gc> *v;
vec_alloc (v, fd->ordered - fd->collapse + 1);
int idx;
for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
{
tree c;
if (idx == 0 && fd->collapse > 1)
c = fd->loop.n2;
else
c = counts[idx + fd->collapse - 1];
tree purpose = size_int (idx);
CONSTRUCTOR_APPEND_ELT (v, purpose, c);
if (TREE_CODE (c) != INTEGER_CST)
TREE_STATIC (arr) = 0;
}
DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
if (!TREE_STATIC (arr))
force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
void_type_node, arr),
true, NULL_TREE, true, GSI_SAME_STMT);
t1 = build_fold_addr_expr (arr);
t2 = NULL_TREE;
}
else
{
t2 = fold_convert (fd->iter_type, fd->loop.step);
t1 = fd->loop.n2;
t0 = fd->loop.n1;
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
tree innerc
= omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
t0 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
t1 = OMP_CLAUSE_DECL (innerc);
}
if (POINTER_TYPE_P (TREE_TYPE (t0))
&& TYPE_PRECISION (TREE_TYPE (t0))
!= TYPE_PRECISION (fd->iter_type))
{
/* Avoid casting pointers to integer of a different size. */
tree itype = signed_type_for (type);
t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
}
else
{
t1 = fold_convert (fd->iter_type, t1);
t0 = fold_convert (fd->iter_type, t0);
}
if (bias)
{
t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
}
}
if (fd->iter_type == long_integer_type_node || fd->ordered)
{
if (fd->chunk_size)
{
t = fold_convert (fd->iter_type, fd->chunk_size);
t = omp_adjust_chunk_size (t, fd->simd_schedule);
if (sched_arg)
{
if (fd->ordered)
t = build_call_expr (builtin_decl_explicit (start_fn),
8, t0, t1, sched_arg, t, t3, t4,
reductions, mem);
else
t = build_call_expr (builtin_decl_explicit (start_fn),
9, t0, t1, t2, sched_arg, t, t3, t4,
reductions, mem);
}
else if (fd->ordered)
t = build_call_expr (builtin_decl_explicit (start_fn),
5, t0, t1, t, t3, t4);
else
t = build_call_expr (builtin_decl_explicit (start_fn),
6, t0, t1, t2, t, t3, t4);
}
else if (fd->ordered)
t = build_call_expr (builtin_decl_explicit (start_fn),
4, t0, t1, t3, t4);
else
t = build_call_expr (builtin_decl_explicit (start_fn),
5, t0, t1, t2, t3, t4);
}
else
{
tree t5;
tree c_bool_type;
tree bfn_decl;
/* The GOMP_loop_ull_*start functions have additional boolean
argument, true for < loops and false for > loops.
In Fortran, the C bool type can be different from
boolean_type_node. */
bfn_decl = builtin_decl_explicit (start_fn);
c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
t5 = build_int_cst (c_bool_type,
fd->loop.cond_code == LT_EXPR ? 1 : 0);
if (fd->chunk_size)
{
tree bfn_decl = builtin_decl_explicit (start_fn);
t = fold_convert (fd->iter_type, fd->chunk_size);
t = omp_adjust_chunk_size (t, fd->simd_schedule);
if (sched_arg)
t = build_call_expr (bfn_decl, 10, t5, t0, t1, t2, sched_arg,
t, t3, t4, reductions, mem);
else
t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
}
else
t = build_call_expr (builtin_decl_explicit (start_fn),
6, t5, t0, t1, t2, t3, t4);
}
}
if (TREE_TYPE (t) != boolean_type_node)
t = fold_build2 (NE_EXPR, boolean_type_node,
t, build_int_cst (TREE_TYPE (t), 0));
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
if (arr && !TREE_STATIC (arr))
{
tree clobber = build_clobber (TREE_TYPE (arr));
gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
GSI_SAME_STMT);
}
if (fd->have_pointer_condtemp)
expand_omp_build_assign (&gsi, condtemp, memv, false);
if (fd->have_reductemp)
{
gimple *g = gsi_stmt (gsi);
gsi_remove (&gsi, true);
release_ssa_name (gimple_assign_lhs (g));
entry_bb = region->entry;
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
}
gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
/* Remove the GIMPLE_OMP_FOR statement. */
gsi_remove (&gsi, true);
if (gsi_end_p (gsif))
gsif = gsi_after_labels (gsi_bb (gsif));
gsi_next (&gsif);
/* Iteration setup for sequential loop goes in L0_BB. */
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
if (gimple_omp_for_combined_p (fd->for_stmt))
{
gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (inner_stmt)
== GF_OMP_FOR_KIND_SIMD);
tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
startvar = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
endvar = OMP_CLAUSE_DECL (innerc);
}
gsi = gsi_start_bb (l0_bb);
t = istart0;
if (fd->ordered && fd->collapse == 1)
t = fold_build2 (MULT_EXPR, fd->iter_type, t,
fold_convert (fd->iter_type, fd->loop.step));
else if (bias)
t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
if (fd->ordered && fd->collapse == 1)
{
if (POINTER_TYPE_P (TREE_TYPE (startvar)))
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
fd->loop.n1, fold_convert (sizetype, t));
else
{
t = fold_convert (TREE_TYPE (startvar), t);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
fd->loop.n1, t);
}
}
else
{
if (POINTER_TYPE_P (TREE_TYPE (startvar)))
t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
t = fold_convert (TREE_TYPE (startvar), t);
}
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (startvar)
&& TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
/* For lastprivate(conditional:) itervar, we need some iteration
counter that starts at unsigned non-zero and increases.
Prefer as few IVs as possible, so if we can use startvar
itself, use that, or startvar + constant (those would be
incremented with step), and as last resort use the s0 + 1
incremented by 1. */
if ((fd->ordered && fd->collapse == 1)
|| bias
|| POINTER_TYPE_P (type)
|| TREE_CODE (fd->loop.n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, istart0),
build_int_cst (itype, 1));
else if (tree_int_cst_sgn (fd->loop.n1) == 1)
t = fold_convert (itype, t);
else
{
tree c = fold_convert (itype, fd->loop.n1);
c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c);
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c);
}
t = force_gimple_operand_gsi (&gsi, t, false,
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (cond_var, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
t = iend0;
if (fd->ordered && fd->collapse == 1)
t = fold_build2 (MULT_EXPR, fd->iter_type, t,
fold_convert (fd->iter_type, fd->loop.step));
else if (bias)
t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
if (fd->ordered && fd->collapse == 1)
{
if (POINTER_TYPE_P (TREE_TYPE (startvar)))
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
fd->loop.n1, fold_convert (sizetype, t));
else
{
t = fold_convert (TREE_TYPE (startvar), t);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
fd->loop.n1, t);
}
}
else
{
if (POINTER_TYPE_P (TREE_TYPE (startvar)))
t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
t = fold_convert (TREE_TYPE (startvar), t);
}
iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
if (endvar)
{
assign_stmt = gimple_build_assign (endvar, iend);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
assign_stmt = gimple_build_assign (fd->loop.v, iend);
else
assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
/* Handle linear clause adjustments. */
tree itercnt = NULL_TREE;
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (fd->for_stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
tree d = OMP_CLAUSE_DECL (c);
bool is_ref = omp_is_reference (d);
tree t = d, a, dest;
if (is_ref)
t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
tree type = TREE_TYPE (t);
if (POINTER_TYPE_P (type))
type = sizetype;
dest = unshare_expr (t);
tree v = create_tmp_var (TREE_TYPE (t), NULL);
expand_omp_build_assign (&gsif, v, t);
if (itercnt == NULL_TREE)
{
itercnt = startvar;
tree n1 = fd->loop.n1;
if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
{
itercnt
= fold_convert (signed_type_for (TREE_TYPE (itercnt)),
itercnt);
n1 = fold_convert (TREE_TYPE (itercnt), n1);
}
itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
itercnt, n1);
itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
itercnt, fd->loop.step);
itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
}
a = fold_build2 (MULT_EXPR, type,
fold_convert (type, itercnt),
fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
: POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (dest, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
if (fd->collapse > 1)
expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar);
if (fd->ordered)
{
/* Until now, counts array contained number of iterations or
variable containing it for ith loop. From now on, we need
those counts only for collapsed loops, and only for the 2nd
till the last collapsed one. Move those one element earlier,
we'll use counts[fd->collapse - 1] for the first source/sink
iteration counter and so on and counts[fd->ordered]
as the array holding the current counter values for
depend(source). */
if (fd->collapse > 1)
memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
if (broken_loop)
{
int i;
for (i = fd->collapse; i < fd->ordered; i++)
{
tree type = TREE_TYPE (fd->loops[i].v);
tree this_cond
= fold_build2 (fd->loops[i].cond_code, boolean_type_node,
fold_convert (type, fd->loops[i].n1),
fold_convert (type, fd->loops[i].n2));
if (!integer_onep (this_cond))
break;
}
if (i < fd->ordered)
{
cont_bb
= create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
add_bb_to_loop (cont_bb, l1_bb->loop_father);
gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
make_edge (cont_bb, l1_bb, 0);
l2_bb = create_empty_bb (cont_bb);
broken_loop = false;
}
}
expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
ordered_lastprivate);
if (counts[fd->collapse - 1])
{
gcc_assert (fd->collapse == 1);
gsi = gsi_last_bb (l0_bb);
expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
istart0, true);
gsi = gsi_last_bb (cont_bb);
t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
build_int_cst (fd->iter_type, 1));
expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
size_zero_node, NULL_TREE, NULL_TREE);
expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
t = counts[fd->collapse - 1];
}
else if (fd->collapse > 1)
t = fd->loop.v;
else
{
t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
fd->loops[0].v, fd->loops[0].n1);
t = fold_convert (fd->iter_type, t);
}
gsi = gsi_last_bb (l0_bb);
tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
size_zero_node, NULL_TREE, NULL_TREE);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
expand_omp_build_assign (&gsi, aref, t, true);
}
if (!broken_loop)
{
/* Code to control the increment and predicate for the sequential
loop goes in the CONT_BB. */
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
tree t2;
if ((fd->ordered && fd->collapse == 1)
|| bias
|| POINTER_TYPE_P (type)
|| TREE_CODE (fd->loop.n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t2 = build_int_cst (itype, 1);
else
t2 = fold_convert (itype, fd->loop.step);
t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2);
t2 = force_gimple_operand_gsi (&gsi, t2, false,
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (cond_var, t2);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, fd->loop.step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
{
tree tem;
if (fd->collapse > 1)
tem = fd->loop.v;
else
{
tem = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
fd->loops[0].v, fd->loops[0].n1);
tem = fold_convert (fd->iter_type, tem);
}
tree aref = build4 (ARRAY_REF, fd->iter_type,
counts[fd->ordered], size_zero_node,
NULL_TREE, NULL_TREE);
tem = force_gimple_operand_gsi (&gsi, tem, true, NULL_TREE,
true, GSI_SAME_STMT);
expand_omp_build_assign (&gsi, aref, tem);
}
t = build2 (fd->loop.cond_code, boolean_type_node,
DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
iend);
gcond *cond_stmt = gimple_build_cond_empty (t);
gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
}
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, NULL, cont_bb, l1_bb);
/* Emit code to get the next parallel iteration in L2_BB. */
gsi = gsi_start_bb (l2_bb);
t = build_call_expr (builtin_decl_explicit (next_fn), 2,
build_fold_addr_expr (istart0),
build_fold_addr_expr (iend0));
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
if (TREE_TYPE (t) != boolean_type_node)
t = fold_build2 (NE_EXPR, boolean_type_node,
t, build_int_cst (TREE_TYPE (t), 0));
gcond *cond_stmt = gimple_build_cond_empty (t);
gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
}
/* Add the loop cleanup function. */
gsi = gsi_last_nondebug_bb (exit_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
gcall *call_stmt = gimple_build_call (t, 0);
if (fd->ordered)
{
tree arr = counts[fd->ordered];
tree clobber = build_clobber (TREE_TYPE (arr));
gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
GSI_SAME_STMT);
}
if (gimple_omp_return_lhs (gsi_stmt (gsi)))
{
gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
if (fd->have_reductemp)
{
gimple *g = gimple_build_assign (reductions, NOP_EXPR,
gimple_call_lhs (call_stmt));
gsi_insert_after (&gsi, g, GSI_SAME_STMT);
}
}
gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
/* Connect the new blocks. */
find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
if (!broken_loop)
{
gimple_seq phis;
e = find_edge (cont_bb, l3_bb);
ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
phis = phi_nodes (l3_bb);
for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *phi = gsi_stmt (gsi);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
PHI_ARG_DEF_FROM_EDGE (phi, e));
}
remove_edge (e);
make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
e = find_edge (cont_bb, l1_bb);
if (e == NULL)
{
e = BRANCH_EDGE (cont_bb);
gcc_assert (single_succ (e->dest) == l1_bb);
}
if (gimple_omp_for_combined_p (fd->for_stmt))
{
remove_edge (e);
e = NULL;
}
else if (fd->collapse > 1)
{
remove_edge (e);
e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
}
else
e->flags = EDGE_TRUE_VALUE;
if (e)
{
e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
find_edge (cont_bb, l2_bb)->probability = e->probability.invert ();
}
else
{
e = find_edge (cont_bb, l2_bb);
e->flags = EDGE_FALLTHRU;
}
make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
if (gimple_in_ssa_p (cfun))
{
/* Add phis to the outer loop that connect to the phis in the inner,
original loop, and move the loop entry value of the inner phi to
the loop entry value of the outer phi. */
gphi_iterator psi;
for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
{
location_t locus;
gphi *nphi;
gphi *exit_phi = psi.phi ();
if (virtual_operand_p (gimple_phi_result (exit_phi)))
continue;
edge l2_to_l3 = find_edge (l2_bb, l3_bb);
tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
basic_block latch = BRANCH_EDGE (cont_bb)->dest;
edge latch_to_l1 = find_edge (latch, l1_bb);
gphi *inner_phi
= find_phi_with_arg_on_edge (exit_res, latch_to_l1);
tree t = gimple_phi_result (exit_phi);
tree new_res = copy_ssa_name (t, NULL);
nphi = create_phi_node (new_res, l0_bb);
edge l0_to_l1 = find_edge (l0_bb, l1_bb);
t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
edge entry_to_l0 = find_edge (entry_bb, l0_bb);
add_phi_arg (nphi, t, entry_to_l0, locus);
edge l2_to_l0 = find_edge (l2_bb, l0_bb);
add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
}
}
set_immediate_dominator (CDI_DOMINATORS, l2_bb,
recompute_dominator (CDI_DOMINATORS, l2_bb));
set_immediate_dominator (CDI_DOMINATORS, l3_bb,
recompute_dominator (CDI_DOMINATORS, l3_bb));
set_immediate_dominator (CDI_DOMINATORS, l0_bb,
recompute_dominator (CDI_DOMINATORS, l0_bb));
set_immediate_dominator (CDI_DOMINATORS, l1_bb,
recompute_dominator (CDI_DOMINATORS, l1_bb));
/* We enter expand_omp_for_generic with a loop. This original loop may
have its own loop struct, or it may be part of an outer loop struct
(which may be the fake loop). */
class loop *outer_loop = entry_bb->loop_father;
bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
add_bb_to_loop (l2_bb, outer_loop);
/* We've added a new loop around the original loop. Allocate the
corresponding loop struct. */
class loop *new_loop = alloc_loop ();
new_loop->header = l0_bb;
new_loop->latch = l2_bb;
add_loop (new_loop, outer_loop);
/* Allocate a loop structure for the original loop unless we already
had one. */
if (!orig_loop_has_loop_struct
&& !gimple_omp_for_combined_p (fd->for_stmt))
{
class loop *orig_loop = alloc_loop ();
orig_loop->header = l1_bb;
/* The loop may have multiple latches. */
add_loop (orig_loop, new_loop);
}
}
}
/* Helper function for expand_omp_for_static_nochunk. If PTR is NULL,
compute needed allocation size. If !ALLOC of team allocations,
if ALLOC of thread allocation. SZ is the initial needed size for
other purposes, ALLOC_ALIGN guaranteed alignment of allocation in bytes,
CNT number of elements of each array, for !ALLOC this is
omp_get_num_threads (), for ALLOC number of iterations handled by the
current thread. If PTR is non-NULL, it is the start of the allocation
and this routine shall assign to OMP_CLAUSE_DECL (c) of those _scantemp_
clauses pointers to the corresponding arrays. */
static tree
expand_omp_scantemp_alloc (tree clauses, tree ptr, unsigned HOST_WIDE_INT sz,
unsigned HOST_WIDE_INT alloc_align, tree cnt,
gimple_stmt_iterator *gsi, bool alloc)
{
tree eltsz = NULL_TREE;
unsigned HOST_WIDE_INT preval = 0;
if (ptr && sz)
ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr),
ptr, size_int (sz));
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_
&& !OMP_CLAUSE__SCANTEMP__CONTROL (c)
&& (!OMP_CLAUSE__SCANTEMP__ALLOC (c)) != alloc)
{
tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
unsigned HOST_WIDE_INT al = TYPE_ALIGN_UNIT (pointee_type);
if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type)))
{
unsigned HOST_WIDE_INT szl
= tree_to_uhwi (TYPE_SIZE_UNIT (pointee_type));
szl = least_bit_hwi (szl);
if (szl)
al = MIN (al, szl);
}
if (ptr == NULL_TREE)
{
if (eltsz == NULL_TREE)
eltsz = TYPE_SIZE_UNIT (pointee_type);
else
eltsz = size_binop (PLUS_EXPR, eltsz,
TYPE_SIZE_UNIT (pointee_type));
}
if (preval == 0 && al <= alloc_align)
{
unsigned HOST_WIDE_INT diff = ROUND_UP (sz, al) - sz;
sz += diff;
if (diff && ptr)
ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr),
ptr, size_int (diff));
}
else if (al > preval)
{
if (ptr)
{
ptr = fold_convert (pointer_sized_int_node, ptr);
ptr = fold_build2 (PLUS_EXPR, pointer_sized_int_node, ptr,
build_int_cst (pointer_sized_int_node,
al - 1));
ptr = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, ptr,
build_int_cst (pointer_sized_int_node,
-(HOST_WIDE_INT) al));
ptr = fold_convert (ptr_type_node, ptr);
}
else
sz += al - 1;
}
if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (pointee_type)))
preval = al;
else
preval = 1;
if (ptr)
{
expand_omp_build_assign (gsi, OMP_CLAUSE_DECL (c), ptr, false);
ptr = OMP_CLAUSE_DECL (c);
ptr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (ptr), ptr,
size_binop (MULT_EXPR, cnt,
TYPE_SIZE_UNIT (pointee_type)));
}
}
if (ptr == NULL_TREE)
{
eltsz = size_binop (MULT_EXPR, eltsz, cnt);
if (sz)
eltsz = size_binop (PLUS_EXPR, eltsz, size_int (sz));
return eltsz;
}
else
return ptr;
}
/* Return the last _looptemp_ clause if one has been created for
lastprivate on distribute parallel for{, simd} or taskloop.
FD is the loop data and INNERC should be the second _looptemp_
clause (the one holding the end of the range).
This is followed by collapse - 1 _looptemp_ clauses for the
counts[1] and up, and for triangular loops followed by 4
further _looptemp_ clauses (one for counts[0], one first_inner_iterations,
one factor and one adjn1). After this there is optionally one
_looptemp_ clause that this function returns. */
static tree
find_lastprivate_looptemp (struct omp_for_data *fd, tree innerc)
{
gcc_assert (innerc);
int count = fd->collapse - 1;
if (fd->non_rect
&& fd->last_nonrect == fd->first_nonrect + 1
&& !TYPE_UNSIGNED (TREE_TYPE (fd->loops[fd->last_nonrect].v)))
count += 4;
for (int i = 0; i < count; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
}
return omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with static schedule and no specified chunk size. Given
parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
if (cond is <)
adj = STEP - 1;
else
adj = STEP + 1;
if ((__typeof (V)) -1 > 0 && cond is >)
n = -(adj + N2 - N1) / -STEP;
else
n = (adj + N2 - N1) / STEP;
q = n / nthreads;
tt = n % nthreads;
if (threadid < tt) goto L3; else goto L4;
L3:
tt = 0;
q = q + 1;
L4:
s0 = q * threadid + tt;
e0 = s0 + q;
V = s0 * STEP + N1;
if (s0 >= e0) goto L2; else goto L0;
L0:
e = e0 * STEP + N1;
L1:
BODY;
V += STEP;
if (V cond e) goto L1;
L2:
*/
static void
expand_omp_for_static_nochunk (struct omp_region *region,
struct omp_for_data *fd,
gimple *inner_stmt)
{
tree n, q, s0, e0, e, t, tt, nthreads = NULL_TREE, threadid;
tree type, itype, vmain, vback;
basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
basic_block body_bb, cont_bb, collapse_bb = NULL;
basic_block fin_bb, fourth_bb = NULL, fifth_bb = NULL, sixth_bb = NULL;
basic_block exit1_bb = NULL, exit2_bb = NULL, exit3_bb = NULL;
gimple_stmt_iterator gsi, gsip;
edge ep;
bool broken_loop = region->cont == NULL;
tree *counts = NULL;
tree n1, n2, step;
tree reductions = NULL_TREE;
tree cond_var = NULL_TREE, condtemp = NULL_TREE;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
itype = signed_type_for (type);
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
fin_bb = BRANCH_EDGE (entry_bb)->dest;
gcc_assert (broken_loop
|| (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
body_bb = single_succ (seq_start_bb);
if (!broken_loop)
{
gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
|| single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
}
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
gsip = gsi;
gsi_prev (&gsip);
if (fd->collapse > 1)
{
int first_zero_iter = -1, dummy = -1;
basic_block l2_dom_bb = NULL, dummy_bb = NULL;
counts = XALLOCAVEC (tree, fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
fin_bb, first_zero_iter,
dummy_bb, dummy, l2_dom_bb);
t = NULL_TREE;
}
else if (gimple_omp_for_combined_into_p (fd->for_stmt))
t = integer_one_node;
else
t = fold_binary (fd->loop.cond_code, boolean_type_node,
fold_convert (type, fd->loop.n1),
fold_convert (type, fd->loop.n2));
if (fd->collapse == 1
&& TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
n2 = fold_convert (type, unshare_expr (fd->loop.n2));
n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL))
{
gsi = gsi_for_stmt (cond_stmt);
gimple_regimplify_operands (cond_stmt, &gsi);
}
ep = split_block (entry_bb, cond_stmt);
ep->flags = EDGE_TRUE_VALUE;
entry_bb = ep->dest;
ep->probability = profile_probability::very_likely ();
ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
ep->probability = profile_probability::very_unlikely ();
if (gimple_in_ssa_p (cfun))
{
int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
for (gphi_iterator gpi = gsi_start_phis (fin_bb);
!gsi_end_p (gpi); gsi_next (&gpi))
{
gphi *phi = gpi.phi ();
add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
ep, UNKNOWN_LOCATION);
}
}
gsi = gsi_last_bb (entry_bb);
}
if (fd->lastprivate_conditional)
{
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_);
if (fd->have_pointer_condtemp)
condtemp = OMP_CLAUSE_DECL (c);
c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_);
cond_var = OMP_CLAUSE_DECL (c);
}
if (fd->have_reductemp
/* For scan, we don't want to reinitialize condtemp before the
second loop. */
|| (fd->have_pointer_condtemp && !fd->have_scantemp)
|| fd->have_nonctrl_scantemp)
{
tree t1 = build_int_cst (long_integer_type_node, 0);
tree t2 = build_int_cst (long_integer_type_node, 1);
tree t3 = build_int_cstu (long_integer_type_node,
(HOST_WIDE_INT_1U << 31) + 1);
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
gimple_stmt_iterator gsi2 = gsi_none ();
gimple *g = NULL;
tree mem = null_pointer_node, memv = NULL_TREE;
unsigned HOST_WIDE_INT condtemp_sz = 0;
unsigned HOST_WIDE_INT alloc_align = 0;
if (fd->have_reductemp)
{
gcc_assert (!fd->have_nonctrl_scantemp);
tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
reductions = OMP_CLAUSE_DECL (c);
gcc_assert (TREE_CODE (reductions) == SSA_NAME);
g = SSA_NAME_DEF_STMT (reductions);
reductions = gimple_assign_rhs1 (g);
OMP_CLAUSE_DECL (c) = reductions;
gsi2 = gsi_for_stmt (g);
}
else
{
if (gsi_end_p (gsip))
gsi2 = gsi_after_labels (region->entry);
else
gsi2 = gsip;
reductions = null_pointer_node;
}
if (fd->have_pointer_condtemp || fd->have_nonctrl_scantemp)
{
tree type;
if (fd->have_pointer_condtemp)
type = TREE_TYPE (condtemp);
else
type = ptr_type_node;
memv = create_tmp_var (type);
TREE_ADDRESSABLE (memv) = 1;
unsigned HOST_WIDE_INT sz = 0;
tree size = NULL_TREE;
if (fd->have_pointer_condtemp)
{
sz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type)));
sz *= fd->lastprivate_conditional;
condtemp_sz = sz;
}
if (fd->have_nonctrl_scantemp)
{
nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
gimple *g = gimple_build_call (nthreads, 0);
nthreads = create_tmp_var (integer_type_node);
gimple_call_set_lhs (g, nthreads);
gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
nthreads = fold_convert (sizetype, nthreads);
alloc_align = TYPE_ALIGN_UNIT (long_long_integer_type_node);
size = expand_omp_scantemp_alloc (clauses, NULL_TREE, sz,
alloc_align, nthreads, NULL,
false);
size = fold_convert (type, size);
}
else
size = build_int_cst (type, sz);
expand_omp_build_assign (&gsi2, memv, size, false);
mem = build_fold_addr_expr (memv);
}
tree t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START),
9, t1, t2, t2, t3, t1, null_pointer_node,
null_pointer_node, reductions, mem);
force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
if (fd->have_pointer_condtemp)
expand_omp_build_assign (&gsi2, condtemp, memv, false);
if (fd->have_nonctrl_scantemp)
{
tree ptr = fd->have_pointer_condtemp ? condtemp : memv;
expand_omp_scantemp_alloc (clauses, ptr, condtemp_sz,
alloc_align, nthreads, &gsi2, false);
}
if (fd->have_reductemp)
{
gsi_remove (&gsi2, true);
release_ssa_name (gimple_assign_lhs (g));
}
}
switch (gimple_omp_for_kind (fd->for_stmt))
{
case GF_OMP_FOR_KIND_FOR:
nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
break;
default:
gcc_unreachable ();
}
nthreads = build_call_expr (nthreads, 0);
nthreads = fold_convert (itype, nthreads);
nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
true, GSI_SAME_STMT);
threadid = build_call_expr (threadid, 0);
threadid = fold_convert (itype, threadid);
threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
true, GSI_SAME_STMT);
n1 = fd->loop.n1;
n2 = fd->loop.n2;
step = fd->loop.step;
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n1 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n2 = OMP_CLAUSE_DECL (innerc);
}
n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
true, NULL_TREE, true, GSI_SAME_STMT);
n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
true, NULL_TREE, true, GSI_SAME_STMT);
step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
true, NULL_TREE, true, GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype, step, t);
t = fold_build2 (PLUS_EXPR, itype, t, n2);
t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
q = create_tmp_reg (itype, "q");
t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
tt = create_tmp_reg (itype, "tt");
t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
gcond *cond_stmt = gimple_build_cond_empty (t);
gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
second_bb = split_block (entry_bb, cond_stmt)->dest;
gsi = gsi_last_nondebug_bb (second_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
GSI_SAME_STMT);
gassign *assign_stmt
= gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
third_bb = split_block (second_bb, assign_stmt)->dest;
gsi = gsi_last_nondebug_bb (third_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->have_nonctrl_scantemp)
{
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
tree controlp = NULL_TREE, controlb = NULL_TREE;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_
&& OMP_CLAUSE__SCANTEMP__CONTROL (c))
{
if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node)
controlb = OMP_CLAUSE_DECL (c);
else
controlp = OMP_CLAUSE_DECL (c);
if (controlb && controlp)
break;
}
gcc_assert (controlp && controlb);
tree cnt = create_tmp_var (sizetype);
gimple *g = gimple_build_assign (cnt, NOP_EXPR, q);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
unsigned HOST_WIDE_INT alloc_align = TYPE_ALIGN_UNIT (ptr_type_node);
tree sz = expand_omp_scantemp_alloc (clauses, NULL_TREE, 0,
alloc_align, cnt, NULL, true);
tree size = create_tmp_var (sizetype);
expand_omp_build_assign (&gsi, size, sz, false);
tree cmp = fold_build2 (GT_EXPR, boolean_type_node,
size, size_int (16384));
expand_omp_build_assign (&gsi, controlb, cmp);
g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
fourth_bb = split_block (third_bb, g)->dest;
gsi = gsi_last_nondebug_bb (fourth_bb);
/* FIXME: Once we have allocators, this should use allocator. */
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_MALLOC), 1, size);
gimple_call_set_lhs (g, controlp);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
expand_omp_scantemp_alloc (clauses, controlp, 0, alloc_align, cnt,
&gsi, true);
gsi_prev (&gsi);
g = gsi_stmt (gsi);
fifth_bb = split_block (fourth_bb, g)->dest;
gsi = gsi_last_nondebug_bb (fifth_bb);
g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0);
gimple_call_set_lhs (g, controlp);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
tree alloca_decl = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_
&& OMP_CLAUSE__SCANTEMP__ALLOC (c))
{
tree tmp = create_tmp_var (sizetype);
tree pointee_type = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
g = gimple_build_assign (tmp, MULT_EXPR, cnt,
TYPE_SIZE_UNIT (pointee_type));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
g = gimple_build_call (alloca_decl, 2, tmp,
size_int (TYPE_ALIGN (pointee_type)));
gimple_call_set_lhs (g, OMP_CLAUSE_DECL (c));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
sixth_bb = split_block (fifth_bb, g)->dest;
gsi = gsi_last_nondebug_bb (sixth_bb);
}
t = build2 (MULT_EXPR, itype, q, threadid);
t = build2 (PLUS_EXPR, itype, t, tt);
s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
t = fold_build2 (PLUS_EXPR, itype, s0, q);
e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
t = build2 (GE_EXPR, boolean_type_node, s0, e0);
gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
/* Remove the GIMPLE_OMP_FOR statement. */
gsi_remove (&gsi, true);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
gsi = gsi_start_bb (seq_start_bb);
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
if (gimple_omp_for_combined_p (fd->for_stmt))
{
tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
? gimple_omp_parallel_clauses (inner_stmt)
: gimple_omp_for_clauses (inner_stmt);
tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
startvar = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
endvar = OMP_CLAUSE_DECL (innerc);
if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
&& gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
innerc = find_lastprivate_looptemp (fd, innerc);
if (innerc)
{
/* If needed (distribute parallel for with lastprivate),
propagate down the total number of iterations. */
tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
fd->loop.n2);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
}
}
t = fold_convert (itype, s0);
t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
{
t = fold_build_pointer_plus (n1, t);
if (!POINTER_TYPE_P (TREE_TYPE (startvar))
&& TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type))
t = fold_convert (signed_type_for (type), t);
}
else
t = fold_build2 (PLUS_EXPR, type, t, n1);
t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (startvar)
&& TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
/* For lastprivate(conditional:) itervar, we need some iteration
counter that starts at unsigned non-zero and increases.
Prefer as few IVs as possible, so if we can use startvar
itself, use that, or startvar + constant (those would be
incremented with step), and as last resort use the s0 + 1
incremented by 1. */
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0),
build_int_cst (itype, 1));
else if (tree_int_cst_sgn (n1) == 1)
t = fold_convert (itype, t);
else
{
tree c = fold_convert (itype, n1);
c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c);
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c);
}
t = force_gimple_operand_gsi (&gsi, t, false,
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (cond_var, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
{
t = fold_build_pointer_plus (n1, t);
if (!POINTER_TYPE_P (TREE_TYPE (startvar))
&& TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type))
t = fold_convert (signed_type_for (type), t);
}
else
t = fold_build2 (PLUS_EXPR, type, t, n1);
t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
if (endvar)
{
assign_stmt = gimple_build_assign (endvar, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
assign_stmt = gimple_build_assign (fd->loop.v, e);
else
assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
/* Handle linear clause adjustments. */
tree itercnt = NULL_TREE;
tree *nonrect_bounds = NULL;
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (fd->for_stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
tree d = OMP_CLAUSE_DECL (c);
bool is_ref = omp_is_reference (d);
tree t = d, a, dest;
if (is_ref)
t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
if (itercnt == NULL_TREE)
{
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
itercnt = fold_build2 (MINUS_EXPR, itype,
fold_convert (itype, n1),
fold_convert (itype, fd->loop.n1));
itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step);
itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0);
itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
}
else
itercnt = s0;
}
tree type = TREE_TYPE (t);
if (POINTER_TYPE_P (type))
type = sizetype;
a = fold_build2 (MULT_EXPR, type,
fold_convert (type, itercnt),
fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
dest = unshare_expr (t);
t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
: POINTER_PLUS_EXPR, TREE_TYPE (t), t, a);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (dest, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
if (fd->collapse > 1)
{
if (fd->non_rect)
{
nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1);
memset (nonrect_bounds, 0, sizeof (tree) * (fd->last_nonrect + 1));
}
expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds, inner_stmt,
startvar);
}
if (!broken_loop)
{
/* The code controlling the sequential loop replaces the
GIMPLE_OMP_CONTINUE. */
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
tree t2;
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t2 = build_int_cst (itype, 1);
else
t2 = fold_convert (itype, step);
t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2);
t2 = force_gimple_operand_gsi (&gsi, t2, false,
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (cond_var, t2);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
t = build2 (fd->loop.cond_code, boolean_type_node,
DECL_P (vback) && TREE_ADDRESSABLE (vback)
? t : vback, e);
gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
}
/* Remove the GIMPLE_OMP_CONTINUE statement. */
gsi_remove (&gsi, true);
if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, nonrect_bounds,
cont_bb, body_bb);
}
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
gsi = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
{
t = gimple_omp_return_lhs (gsi_stmt (gsi));
if (fd->have_reductemp
|| ((fd->have_pointer_condtemp || fd->have_scantemp)
&& !fd->have_nonctrl_scantemp))
{
tree fn;
if (t)
fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
else
fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
gcall *g = gimple_build_call (fn, 0);
if (t)
{
gimple_call_set_lhs (g, t);
if (fd->have_reductemp)
gsi_insert_after (&gsi, gimple_build_assign (reductions,
NOP_EXPR, t),
GSI_SAME_STMT);
}
gsi_insert_after (&gsi, g, GSI_SAME_STMT);
}
else
gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
}
else if ((fd->have_pointer_condtemp || fd->have_scantemp)
&& !fd->have_nonctrl_scantemp)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
gcall *g = gimple_build_call (fn, 0);
gsi_insert_after (&gsi, g, GSI_SAME_STMT);
}
if (fd->have_scantemp && !fd->have_nonctrl_scantemp)
{
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
tree controlp = NULL_TREE, controlb = NULL_TREE;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__SCANTEMP_
&& OMP_CLAUSE__SCANTEMP__CONTROL (c))
{
if (TREE_TYPE (OMP_CLAUSE_DECL (c)) == boolean_type_node)
controlb = OMP_CLAUSE_DECL (c);
else
controlp = OMP_CLAUSE_DECL (c);
if (controlb && controlp)
break;
}
gcc_assert (controlp && controlb);
gimple *g = gimple_build_cond (NE_EXPR, controlb, boolean_false_node,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
exit1_bb = split_block (exit_bb, g)->dest;
gsi = gsi_after_labels (exit1_bb);
g = gimple_build_call (builtin_decl_explicit (BUILT_IN_FREE), 1,
controlp);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
exit2_bb = split_block (exit1_bb, g)->dest;
gsi = gsi_after_labels (exit2_bb);
g = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE), 1,
controlp);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
exit3_bb = split_block (exit2_bb, g)->dest;
gsi = gsi_after_labels (exit3_bb);
}
gsi_remove (&gsi, true);
/* Connect all the blocks. */
ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
ep->probability = profile_probability::guessed_always ().apply_scale (3, 4);
ep = find_edge (entry_bb, second_bb);
ep->flags = EDGE_TRUE_VALUE;
ep->probability = profile_probability::guessed_always ().apply_scale (1, 4);
if (fourth_bb)
{
ep = make_edge (third_bb, fifth_bb, EDGE_FALSE_VALUE);
ep->probability
= profile_probability::guessed_always ().apply_scale (1, 2);
ep = find_edge (third_bb, fourth_bb);
ep->flags = EDGE_TRUE_VALUE;
ep->probability
= profile_probability::guessed_always ().apply_scale (1, 2);
ep = find_edge (fourth_bb, fifth_bb);
redirect_edge_and_branch (ep, sixth_bb);
}
else
sixth_bb = third_bb;
find_edge (sixth_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
find_edge (sixth_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
if (exit1_bb)
{
ep = make_edge (exit_bb, exit2_bb, EDGE_FALSE_VALUE);
ep->probability
= profile_probability::guessed_always ().apply_scale (1, 2);
ep = find_edge (exit_bb, exit1_bb);
ep->flags = EDGE_TRUE_VALUE;
ep->probability
= profile_probability::guessed_always ().apply_scale (1, 2);
ep = find_edge (exit1_bb, exit2_bb);
redirect_edge_and_branch (ep, exit3_bb);
}
if (!broken_loop)
{
ep = find_edge (cont_bb, body_bb);
if (ep == NULL)
{
ep = BRANCH_EDGE (cont_bb);
gcc_assert (single_succ (ep->dest) == body_bb);
}
if (gimple_omp_for_combined_p (fd->for_stmt))
{
remove_edge (ep);
ep = NULL;
}
else if (fd->collapse > 1)
{
remove_edge (ep);
ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
}
else
ep->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags
= ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
}
set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
if (fourth_bb)
{
set_immediate_dominator (CDI_DOMINATORS, fifth_bb, third_bb);
set_immediate_dominator (CDI_DOMINATORS, sixth_bb, third_bb);
}
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, sixth_bb);
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
recompute_dominator (CDI_DOMINATORS, fin_bb));
if (exit1_bb)
{
set_immediate_dominator (CDI_DOMINATORS, exit2_bb, exit_bb);
set_immediate_dominator (CDI_DOMINATORS, exit3_bb, exit_bb);
}
class loop *loop = body_bb->loop_father;
if (loop != entry_bb->loop_father)
{
gcc_assert (broken_loop || loop->header == body_bb);
gcc_assert (broken_loop
|| loop->latch == region->cont
|| single_pred (loop->latch) == region->cont);
return;
}
if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
{
loop = alloc_loop ();
loop->header = body_bb;
if (collapse_bb == NULL)
loop->latch = cont_bb;
add_loop (loop, body_bb->loop_father);
}
}
/* Return phi in E->DEST with ARG on edge E. */
static gphi *
find_phi_with_arg_on_edge (tree arg, edge e)
{
basic_block bb = e->dest;
for (gphi_iterator gpi = gsi_start_phis (bb);
!gsi_end_p (gpi);
gsi_next (&gpi))
{
gphi *phi = gpi.phi ();
if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg)
return phi;
}
return NULL;
}
/* A subroutine of expand_omp_for. Generate code for a parallel
loop with static schedule and a specified chunk size. Given
parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
if (cond is <)
adj = STEP - 1;
else
adj = STEP + 1;
if ((__typeof (V)) -1 > 0 && cond is >)
n = -(adj + N2 - N1) / -STEP;
else
n = (adj + N2 - N1) / STEP;
trip = 0;
V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
here so that V is defined
if the loop is not entered
L0:
s0 = (trip * nthreads + threadid) * CHUNK;
e0 = min (s0 + CHUNK, n);
if (s0 < n) goto L1; else goto L4;
L1:
V = s0 * STEP + N1;
e = e0 * STEP + N1;
L2:
BODY;
V += STEP;
if (V cond e) goto L2; else goto L3;
L3:
trip += 1;
goto L0;
L4:
*/
static void
expand_omp_for_static_chunk (struct omp_region *region,
struct omp_for_data *fd, gimple *inner_stmt)
{
tree n, s0, e0, e, t;
tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
tree type, itype, vmain, vback, vextra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
gimple_stmt_iterator gsi, gsip;
edge se;
bool broken_loop = region->cont == NULL;
tree *counts = NULL;
tree n1, n2, step;
tree reductions = NULL_TREE;
tree cond_var = NULL_TREE, condtemp = NULL_TREE;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
itype = signed_type_for (type);
entry_bb = region->entry;
se = split_block (entry_bb, last_stmt (entry_bb));
entry_bb = se->src;
iter_part_bb = se->dest;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
gcc_assert (broken_loop
|| fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
body_bb = single_succ (seq_start_bb);
if (!broken_loop)
{
gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
|| single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
}
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
gsip = gsi;
gsi_prev (&gsip);
if (fd->collapse > 1)
{
int first_zero_iter = -1, dummy = -1;
basic_block l2_dom_bb = NULL, dummy_bb = NULL;
counts = XALLOCAVEC (tree, fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
fin_bb, first_zero_iter,
dummy_bb, dummy, l2_dom_bb);
t = NULL_TREE;
}
else if (gimple_omp_for_combined_into_p (fd->for_stmt))
t = integer_one_node;
else
t = fold_binary (fd->loop.cond_code, boolean_type_node,
fold_convert (type, fd->loop.n1),
fold_convert (type, fd->loop.n2));
if (fd->collapse == 1
&& TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
n2 = fold_convert (type, unshare_expr (fd->loop.n2));
n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
true, GSI_SAME_STMT);
gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
NULL_TREE, NULL_TREE);
gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL))
{
gsi = gsi_for_stmt (cond_stmt);
gimple_regimplify_operands (cond_stmt, &gsi);
}
se = split_block (entry_bb, cond_stmt);
se->flags = EDGE_TRUE_VALUE;
entry_bb = se->dest;
se->probability = profile_probability::very_likely ();
se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
se->probability = profile_probability::very_unlikely ();
if (gimple_in_ssa_p (cfun))
{
int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx;
for (gphi_iterator gpi = gsi_start_phis (fin_bb);
!gsi_end_p (gpi); gsi_next (&gpi))
{
gphi *phi = gpi.phi ();
add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
se, UNKNOWN_LOCATION);
}
}
gsi = gsi_last_bb (entry_bb);
}
if (fd->lastprivate_conditional)
{
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_);
if (fd->have_pointer_condtemp)
condtemp = OMP_CLAUSE_DECL (c);
c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE__CONDTEMP_);
cond_var = OMP_CLAUSE_DECL (c);
}
if (fd->have_reductemp || fd->have_pointer_condtemp)
{
tree t1 = build_int_cst (long_integer_type_node, 0);
tree t2 = build_int_cst (long_integer_type_node, 1);
tree t3 = build_int_cstu (long_integer_type_node,
(HOST_WIDE_INT_1U << 31) + 1);
tree clauses = gimple_omp_for_clauses (fd->for_stmt);
gimple_stmt_iterator gsi2 = gsi_none ();
gimple *g = NULL;
tree mem = null_pointer_node, memv = NULL_TREE;
if (fd->have_reductemp)
{
tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
reductions = OMP_CLAUSE_DECL (c);
gcc_assert (TREE_CODE (reductions) == SSA_NAME);
g = SSA_NAME_DEF_STMT (reductions);
reductions = gimple_assign_rhs1 (g);
OMP_CLAUSE_DECL (c) = reductions;
gsi2 = gsi_for_stmt (g);
}
else
{
if (gsi_end_p (gsip))
gsi2 = gsi_after_labels (region->entry);
else
gsi2 = gsip;
reductions = null_pointer_node;
}
if (fd->have_pointer_condtemp)
{
tree type = TREE_TYPE (condtemp);
memv = create_tmp_var (type);
TREE_ADDRESSABLE (memv) = 1;
unsigned HOST_WIDE_INT sz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type)));
sz *= fd->lastprivate_conditional;
expand_omp_build_assign (&gsi2, memv, build_int_cst (type, sz),
false);
mem = build_fold_addr_expr (memv);
}
tree t
= build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_LOOP_START),
9, t1, t2, t2, t3, t1, null_pointer_node,
null_pointer_node, reductions, mem);
force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
true, GSI_SAME_STMT);
if (fd->have_pointer_condtemp)
expand_omp_build_assign (&gsi2, condtemp, memv, false);
if (fd->have_reductemp)
{
gsi_remove (&gsi2, true);
release_ssa_name (gimple_assign_lhs (g));
}
}
switch (gimple_omp_for_kind (fd->for_stmt))
{
case GF_OMP_FOR_KIND_FOR:
nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
break;
default:
gcc_unreachable ();
}
nthreads = build_call_expr (nthreads, 0);
nthreads = fold_convert (itype, nthreads);
nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
true, GSI_SAME_STMT);
threadid = build_call_expr (threadid, 0);
threadid = fold_convert (itype, threadid);
threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
true, GSI_SAME_STMT);
n1 = fd->loop.n1;
n2 = fd->loop.n2;
step = fd->loop.step;
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n1 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n2 = OMP_CLAUSE_DECL (innerc);
}
n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
true, NULL_TREE, true, GSI_SAME_STMT);
n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
true, NULL_TREE, true, GSI_SAME_STMT);
step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
true, NULL_TREE, true, GSI_SAME_STMT);
tree chunk_size = fold_convert (itype, fd->chunk_size);
chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule);
chunk_size
= force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true,
GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype, step, t);
t = fold_build2 (PLUS_EXPR, itype, t, n2);
t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
trip_var = create_tmp_reg (itype, ".trip");
if (gimple_in_ssa_p (cfun))
{
trip_init = make_ssa_name (trip_var);
trip_main = make_ssa_name (trip_var);
trip_back = make_ssa_name (trip_var);
}
else
{
trip_init = trip_var;
trip_main = trip_var;
trip_back = trip_var;
}
gassign *assign_stmt
= gimple_build_assign (trip_init, build_int_cst (itype, 0));
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size);
t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (n1, t);
else
t = fold_build2 (PLUS_EXPR, type, t, n1);
vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
/* Remove the GIMPLE_OMP_FOR. */
gsi_remove (&gsi, true);
gimple_stmt_iterator gsif = gsi;
/* Iteration space partitioning goes in ITER_PART_BB. */
gsi = gsi_last_bb (iter_part_bb);
t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
t = fold_build2 (PLUS_EXPR, itype, t, threadid);
t = fold_build2 (MULT_EXPR, itype, t, chunk_size);
s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size);
t = fold_build2 (MIN_EXPR, itype, t, n);
e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = build2 (LT_EXPR, boolean_type_node, s0, n);
gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
/* Setup code for sequential iteration goes in SEQ_START_BB. */
gsi = gsi_start_bb (seq_start_bb);
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
if (gimple_omp_for_combined_p (fd->for_stmt))
{
tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
? gimple_omp_parallel_clauses (inner_stmt)
: gimple_omp_for_clauses (inner_stmt);
tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
startvar = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
endvar = OMP_CLAUSE_DECL (innerc);
if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
&& gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
innerc = find_lastprivate_looptemp (fd, innerc);
if (innerc)
{
/* If needed (distribute parallel for with lastprivate),
propagate down the total number of iterations. */
tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
fd->loop.n2);
t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
}
}
t = fold_convert (itype, s0);
t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
{
t = fold_build_pointer_plus (n1, t);
if (!POINTER_TYPE_P (TREE_TYPE (startvar))
&& TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type))
t = fold_convert (signed_type_for (type), t);
}
else
t = fold_build2 (PLUS_EXPR, type, t, n1);
t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (startvar)
&& TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
/* For lastprivate(conditional:) itervar, we need some iteration
counter that starts at unsigned non-zero and increases.
Prefer as few IVs as possible, so if we can use startvar
itself, use that, or startvar + constant (those would be
incremented with step), and as last resort use the s0 + 1
incremented by 1. */
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, s0),
build_int_cst (itype, 1));
else if (tree_int_cst_sgn (n1) == 1)
t = fold_convert (itype, t);
else
{
tree c = fold_convert (itype, n1);
c = fold_build2 (MINUS_EXPR, itype, build_int_cst (itype, 1), c);
t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, t), c);
}
t = force_gimple_operand_gsi (&gsi, t, false,
NULL_TREE, false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (cond_var, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
t = fold_convert (itype, e0);
t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
{
t = fold_build_pointer_plus (n1, t);
if (!POINTER_TYPE_P (TREE_TYPE (startvar))
&& TYPE_PRECISION (TREE_TYPE (startvar)) > TYPE_PRECISION (type))
t = fold_convert (signed_type_for (type), t);
}
else
t = fold_build2 (PLUS_EXPR, type, t, n1);
t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
if (endvar)
{
assign_stmt = gimple_build_assign (endvar, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
assign_stmt = gimple_build_assign (fd->loop.v, e);
else
assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
/* Handle linear clause adjustments. */
tree itercnt = NULL_TREE, itercntbias = NULL_TREE;
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (fd->for_stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
tree d = OMP_CLAUSE_DECL (c);
bool is_ref = omp_is_reference (d);
tree t = d, a, dest;
if (is_ref)
t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
tree type = TREE_TYPE (t);
if (POINTER_TYPE_P (type))
type = sizetype;
dest = unshare_expr (t);
tree v = create_tmp_var (TREE_TYPE (t), NULL);
expand_omp_build_assign (&gsif, v, t);
if (itercnt == NULL_TREE)
{
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
itercntbias
= fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1),
fold_convert (itype, fd->loop.n1));
itercntbias = fold_build2 (EXACT_DIV_EXPR, itype,
itercntbias, step);
itercntbias
= force_gimple_operand_gsi (&gsif, itercntbias, true,
NULL_TREE, true,
GSI_SAME_STMT);
itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0);
itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
}
else
itercnt = s0;
}
a = fold_build2 (MULT_EXPR, type,
fold_convert (type, itercnt),
fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
: POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (dest, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
if (fd->collapse > 1)
expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar);
if (!broken_loop)
{
/* The code controlling the sequential loop goes in CONT_BB,
replacing the GIMPLE_OMP_CONTINUE. */
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (cond_var)
{
tree itype = TREE_TYPE (cond_var);
tree t2;
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR)
t2 = build_int_cst (itype, 1);
else
t2 = fold_convert (itype, step);
t2 = fold_build2 (PLUS_EXPR, itype, cond_var, t2);
t2 = force_gimple_operand_gsi (&gsi, t2, false,
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (cond_var, t2);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, step);
if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
if (tree_int_cst_equal (fd->chunk_size, integer_one_node))
t = build2 (EQ_EXPR, boolean_type_node,
build_int_cst (itype, 0),
build_int_cst (itype, 1));
else
t = build2 (fd->loop.cond_code, boolean_type_node,
DECL_P (vback) && TREE_ADDRESSABLE (vback)
? t : vback, e);
gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
}
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, NULL, cont_bb, body_bb);
/* Trip update code goes into TRIP_UPDATE_BB. */
gsi = gsi_start_bb (trip_update_bb);
t = build_int_cst (itype, 1);
t = build2 (PLUS_EXPR, itype, trip_main, t);
assign_stmt = gimple_build_assign (trip_back, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
gsi = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
{
t = gimple_omp_return_lhs (gsi_stmt (gsi));
if (fd->have_reductemp || fd->have_pointer_condtemp)
{
tree fn;
if (t)
fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
else
fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
gcall *g = gimple_build_call (fn, 0);
if (t)
{
gimple_call_set_lhs (g, t);
if (fd->have_reductemp)
gsi_insert_after (&gsi, gimple_build_assign (reductions,
NOP_EXPR, t),
GSI_SAME_STMT);
}
gsi_insert_after (&gsi, g, GSI_SAME_STMT);
}
else
gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT);
}
else if (fd->have_pointer_condtemp)
{
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
gcall *g = gimple_build_call (fn, 0);
gsi_insert_after (&gsi, g, GSI_SAME_STMT);
}
gsi_remove (&gsi, true);
/* Connect the new blocks. */
find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
if (!broken_loop)
{
se = find_edge (cont_bb, body_bb);
if (se == NULL)
{
se = BRANCH_EDGE (cont_bb);
gcc_assert (single_succ (se->dest) == body_bb);
}
if (gimple_omp_for_combined_p (fd->for_stmt))
{
remove_edge (se);
se = NULL;
}
else if (fd->collapse > 1)
{
remove_edge (se);
se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
}
else
se->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, trip_update_bb)->flags
= se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
redirect_edge_and_branch (single_succ_edge (trip_update_bb),
iter_part_bb);
}
if (gimple_in_ssa_p (cfun))
{
gphi_iterator psi;
gphi *phi;
edge re, ene;
edge_var_map *vm;
size_t i;
gcc_assert (fd->collapse == 1 && !broken_loop);
/* When we redirect the edge from trip_update_bb to iter_part_bb, we
remove arguments of the phi nodes in fin_bb. We need to create
appropriate phi nodes in iter_part_bb instead. */
se = find_edge (iter_part_bb, fin_bb);
re = single_succ_edge (trip_update_bb);
vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
ene = single_succ_edge (entry_bb);
psi = gsi_start_phis (fin_bb);
for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
gsi_next (&psi), ++i)
{
gphi *nphi;
location_t locus;
phi = psi.phi ();
if (operand_equal_p (gimple_phi_arg_def (phi, 0),
redirect_edge_var_map_def (vm), 0))
continue;
t = gimple_phi_result (phi);
gcc_assert (t == redirect_edge_var_map_result (vm));
if (!single_pred_p (fin_bb))
t = copy_ssa_name (t, phi);
nphi = create_phi_node (t, iter_part_bb);
t = PHI_ARG_DEF_FROM_EDGE (phi, se);
locus = gimple_phi_arg_location_from_edge (phi, se);
/* A special case -- fd->loop.v is not yet computed in
iter_part_bb, we need to use vextra instead. */
if (t == fd->loop.v)
t = vextra;
add_phi_arg (nphi, t, ene, locus);
locus = redirect_edge_var_map_location (vm);
tree back_arg = redirect_edge_var_map_def (vm);
add_phi_arg (nphi, back_arg, re, locus);
edge ce = find_edge (cont_bb, body_bb);
if (ce == NULL)
{
ce = BRANCH_EDGE (cont_bb);
gcc_assert (single_succ (ce->dest) == body_bb);
ce = single_succ_edge (ce->dest);
}
gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce);
gcc_assert (inner_loop_phi != NULL);
add_phi_arg (inner_loop_phi, gimple_phi_result (nphi),
find_edge (seq_start_bb, body_bb), locus);
if (!single_pred_p (fin_bb))
add_phi_arg (phi, gimple_phi_result (nphi), se, locus);
}
gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ()));
redirect_edge_var_map_clear (re);
if (single_pred_p (fin_bb))
while (1)
{
psi = gsi_start_phis (fin_bb);
if (gsi_end_p (psi))
break;
remove_phi_node (&psi, false);
}
/* Make phi node for trip. */
phi = create_phi_node (trip_main, iter_part_bb);
add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
UNKNOWN_LOCATION);
add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
UNKNOWN_LOCATION);
}
if (!broken_loop)
set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
recompute_dominator (CDI_DOMINATORS, iter_part_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
recompute_dominator (CDI_DOMINATORS, fin_bb));
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
recompute_dominator (CDI_DOMINATORS, seq_start_bb));
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
if (!broken_loop)
{
class loop *loop = body_bb->loop_father;
class loop *trip_loop = alloc_loop ();
trip_loop->header = iter_part_bb;
trip_loop->latch = trip_update_bb;
add_loop (trip_loop, iter_part_bb->loop_father);
if (loop != entry_bb->loop_father)
{
gcc_assert (loop->header == body_bb);
gcc_assert (loop->latch == region->cont
|| single_pred (loop->latch) == region->cont);
trip_loop->inner = loop;
return;
}
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
loop = alloc_loop ();
loop->header = body_bb;
if (collapse_bb == NULL)
loop->latch = cont_bb;
add_loop (loop, trip_loop);
}
}
}
/* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
loop. Given parameters:
for (V = N1; V cond N2; V += STEP) BODY;
where COND is "<" or ">", we generate pseudocode
V = N1;
goto L1;
L0:
BODY;
V += STEP;
L1:
if (V cond N2) goto L0; else goto L2;
L2:
For collapsed loops, emit the outer loops as scalar
and only try to vectorize the innermost loop. */
static void
expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
{
tree type, t;
basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
gimple_stmt_iterator gsi;
gimple *stmt;
gcond *cond_stmt;
bool broken_loop = region->cont == NULL;
edge e, ne;
tree *counts = NULL;
int i;
int safelen_int = INT_MAX;
bool dont_vectorize = false;
tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE_SAFELEN);
tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__SIMDUID_);
tree ifc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE_IF);
tree simdlen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE_SIMDLEN);
tree condtemp = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__CONDTEMP_);
tree n1, n2;
tree cond_var = condtemp ? OMP_CLAUSE_DECL (condtemp) : NULL_TREE;
if (safelen)
{
poly_uint64 val;
safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
if (!poly_int_tree_p (safelen, &val))
safelen_int = 0;
else
safelen_int = MIN (constant_lower_bound (val), INT_MAX);
if (safelen_int == 1)
safelen_int = 0;
}
if ((ifc && integer_zerop (OMP_CLAUSE_IF_EXPR (ifc)))
|| (simdlen && integer_onep (OMP_CLAUSE_SIMDLEN_EXPR (simdlen))))
{
safelen_int = 0;
dont_vectorize = true;
}
type = TREE_TYPE (fd->loop.v);
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
gcc_assert (broken_loop
|| BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
if (!broken_loop)
{
gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
l2_bb = BRANCH_EDGE (entry_bb)->dest;
}
else
{
BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
l1_bb = split_edge (BRANCH_EDGE (entry_bb));
l2_bb = single_succ (l1_bb);
}
exit_bb = region->exit;
l2_dom_bb = NULL;
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
/* Not needed in SSA form right now. */
gcc_assert (!gimple_in_ssa_p (cfun));
if (fd->collapse > 1
&& (gimple_omp_for_combined_into_p (fd->for_stmt)
|| broken_loop))
{
int first_zero_iter = -1, dummy = -1;
basic_block zero_iter_bb = l2_bb, dummy_bb = NULL;
counts = XALLOCAVEC (tree, fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
zero_iter_bb, first_zero_iter,
dummy_bb, dummy, l2_dom_bb);
}
if (l2_dom_bb == NULL)
l2_dom_bb = l1_bb;
n1 = fd->loop.n1;
n2 = fd->loop.n2;
if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n1 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n2 = OMP_CLAUSE_DECL (innerc);
}
tree step = fd->loop.step;
bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__SIMT_);
if (is_simt)
{
cfun->curr_properties &= ~PROP_gimple_lomp_dev;
is_simt = safelen_int > 1;
}
tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE;
if (is_simt)
{
simt_lane = create_tmp_var (unsigned_type_node);
gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
gimple_call_set_lhs (g, simt_lane);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step,
fold_convert (TREE_TYPE (step), simt_lane));
n1 = fold_convert (type, n1);
if (POINTER_TYPE_P (type))
n1 = fold_build_pointer_plus (n1, offset);
else
n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset));
/* Collapsed loops not handled for SIMT yet: limit to one lane only. */
if (fd->collapse > 1)
simt_maxlane = build_one_cst (unsigned_type_node);
else if (safelen_int < omp_max_simt_vf ())
simt_maxlane = build_int_cst (unsigned_type_node, safelen_int);
tree vf
= build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF,
unsigned_type_node, 0);
if (simt_maxlane)
vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane);
vf = fold_convert (TREE_TYPE (step), vf);
step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf);
}
tree n2var = NULL_TREE;
tree n2v = NULL_TREE;
tree *nonrect_bounds = NULL;
tree min_arg1 = NULL_TREE, min_arg2 = NULL_TREE;
if (fd->collapse > 1)
{
if (broken_loop || gimple_omp_for_combined_into_p (fd->for_stmt))
{
if (fd->non_rect)
{
nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1);
memset (nonrect_bounds, 0,
sizeof (tree) * (fd->last_nonrect + 1));
}
expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1));
gcc_assert (entry_bb == gsi_bb (gsi));
gcc_assert (fd->for_stmt == gsi_stmt (gsi));
gsi_prev (&gsi);
entry_bb = split_block (entry_bb, gsi_stmt (gsi))->dest;
expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds,
NULL, n1);
gsi = gsi_for_stmt (fd->for_stmt);
}
if (broken_loop)
;
else if (gimple_omp_for_combined_into_p (fd->for_stmt))
{
/* Compute in n2var the limit for the first innermost loop,
i.e. fd->loop.v + MIN (n2 - fd->loop.v, cnt)
where cnt is how many iterations would the loop have if
all further iterations were assigned to the current task. */
n2var = create_tmp_var (type);
i = fd->collapse - 1;
tree itype = TREE_TYPE (fd->loops[i].v);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loops[i].step), t);
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i].n2));
if (fd->loops[i].m2)
{
tree t2 = fold_convert (itype,
fd->loops[i - fd->loops[i].outer].v);
tree t3 = fold_convert (itype, fd->loops[i].m2);
t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3);
t = fold_build2 (PLUS_EXPR, itype, t, t2);
}
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i].v));
if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype,
fold_convert (itype,
fd->loops[i].step)));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
fold_convert (itype, fd->loops[i].step));
t = fold_convert (type, t);
tree t2 = fold_build2 (MINUS_EXPR, type, n2, n1);
min_arg1 = create_tmp_var (type);
expand_omp_build_assign (&gsi, min_arg1, t2);
min_arg2 = create_tmp_var (type);
expand_omp_build_assign (&gsi, min_arg2, t);
}
else
{
if (TREE_CODE (n2) == INTEGER_CST)
{
/* Indicate for lastprivate handling that at least one iteration
has been performed, without wasting runtime. */
if (integer_nonzerop (n2))
expand_omp_build_assign (&gsi, fd->loop.v,
fold_convert (type, n2));
else
/* Indicate that no iteration has been performed. */
expand_omp_build_assign (&gsi, fd->loop.v,
build_one_cst (type));
}
else
{
expand_omp_build_assign (&gsi, fd->loop.v,
build_zero_cst (type));
expand_omp_build_assign (&gsi, n2, build_one_cst (type));
}
for (i = 0; i < fd->collapse; i++)
{
t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
if (fd->loops[i].m1)
{
tree t2
= fold_convert (TREE_TYPE (t),
fd->loops[i - fd->loops[i].outer].v);
tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i].m1);
t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2);
}
expand_omp_build_assign (&gsi, fd->loops[i].v, t);
/* For normal non-combined collapsed loops just initialize
the outermost iterator in the entry_bb. */
if (!broken_loop)
break;
}
}
}
else
expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1));
tree altv = NULL_TREE, altn2 = NULL_TREE;
if (fd->collapse == 1
&& !broken_loop
&& TREE_CODE (fd->loops[0].step) != INTEGER_CST)
{
/* The vectorizer currently punts on loops with non-constant steps
for the main IV (can't compute number of iterations and gives up
because of that). As for OpenMP loops it is always possible to
compute the number of iterations upfront, use an alternate IV
as the loop iterator:
altn2 = n1 < n2 ? (n2 - n1 + step - 1) / step : 0;
for (i = n1, altv = 0; altv < altn2; altv++, i += step) */
altv = create_tmp_var (unsigned_type_for (TREE_TYPE (fd->loops[0].v)));
expand_omp_build_assign (&gsi, altv, build_zero_cst (TREE_TYPE (altv)));
tree itype = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loop.step), t);
t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, n2));
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, fd->loop.v));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype,
fold_convert (itype, fd->loop.step)));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
fold_convert (itype, fd->loop.step));
t = fold_convert (TREE_TYPE (altv), t);
altn2 = create_tmp_var (TREE_TYPE (altv));
expand_omp_build_assign (&gsi, altn2, t);
tree t2 = fold_convert (TREE_TYPE (fd->loop.v), n2);
t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
true, GSI_SAME_STMT);
t2 = fold_build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t2);
gassign *g = gimple_build_assign (altn2, COND_EXPR, t2, altn2,
build_zero_cst (TREE_TYPE (altv)));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
else if (fd->collapse > 1
&& !broken_loop
&& !gimple_omp_for_combined_into_p (fd->for_stmt)
&& TREE_CODE (fd->loops[fd->collapse - 1].step) != INTEGER_CST)
{
altv = create_tmp_var (unsigned_type_for (TREE_TYPE (fd->loops[0].v)));
altn2 = create_tmp_var (TREE_TYPE (altv));
}
if (cond_var)
{
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR
|| tree_int_cst_sgn (n1) != 1)
expand_omp_build_assign (&gsi, cond_var,
build_one_cst (TREE_TYPE (cond_var)));
else
expand_omp_build_assign (&gsi, cond_var,
fold_convert (TREE_TYPE (cond_var), n1));
}
/* Remove the GIMPLE_OMP_FOR statement. */
gsi_remove (&gsi, true);
if (!broken_loop)
{
/* Code to control the increment goes in the CONT_BB. */
gsi = gsi_last_nondebug_bb (cont_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
if (fd->collapse == 1
|| gimple_omp_for_combined_into_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (fd->loop.v, step);
else
t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
expand_omp_build_assign (&gsi, fd->loop.v, t);
}
else if (TREE_CODE (n2) != INTEGER_CST)
expand_omp_build_assign (&gsi, fd->loop.v, build_one_cst (type));
if (altv)
{
t = fold_build2 (PLUS_EXPR, TREE_TYPE (altv), altv,
build_one_cst (TREE_TYPE (altv)));
expand_omp_build_assign (&gsi, altv, t);
}
if (fd->collapse > 1)
{
i = fd->collapse - 1;
if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
{
t = fold_convert (sizetype, fd->loops[i].step);
t = fold_build_pointer_plus (fd->loops[i].v, t);
}
else
{
t = fold_convert (TREE_TYPE (fd->loops[i].v),
fd->loops[i].step);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, t);
}
expand_omp_build_assign (&gsi, fd->loops[i].v, t);
}
if (cond_var)
{
if (POINTER_TYPE_P (type)
|| TREE_CODE (n1) != INTEGER_CST
|| fd->loop.cond_code != LT_EXPR
|| tree_int_cst_sgn (n1) != 1)
t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var,
build_one_cst (TREE_TYPE (cond_var)));
else
t = fold_build2 (PLUS_EXPR, TREE_TYPE (cond_var), cond_var,
fold_convert (TREE_TYPE (cond_var), step));
expand_omp_build_assign (&gsi, cond_var, t);
}
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
}
/* Emit the condition in L1_BB. */
gsi = gsi_start_bb (l1_bb);
if (altv)
t = build2 (LT_EXPR, boolean_type_node, altv, altn2);
else if (fd->collapse > 1
&& !gimple_omp_for_combined_into_p (fd->for_stmt)
&& !broken_loop)
{
i = fd->collapse - 1;
tree itype = TREE_TYPE (fd->loops[i].v);
if (fd->loops[i].m2)
t = n2v = create_tmp_var (itype);
else
t = fold_convert (itype, fd->loops[i].n2);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree v = fd->loops[i].v;
if (DECL_P (v) && TREE_ADDRESSABLE (v))
v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
}
else
{
if (fd->collapse > 1 && !broken_loop)
t = n2var;
else
t = fold_convert (type, n2);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree v = fd->loop.v;
if (DECL_P (v) && TREE_ADDRESSABLE (v))
v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = build2 (fd->loop.cond_code, boolean_type_node, v, t);
}
cond_stmt = gimple_build_cond_empty (t);
gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
NULL, NULL))
{
gsi = gsi_for_stmt (cond_stmt);
gimple_regimplify_operands (cond_stmt, &gsi);
}
/* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */
if (is_simt)
{
gsi = gsi_start_bb (l2_bb);
step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step);
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (fd->loop.v, step);
else
t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step);
expand_omp_build_assign (&gsi, fd->loop.v, t);
}
/* Remove GIMPLE_OMP_RETURN. */
gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
/* Connect the new blocks. */
remove_edge (FALLTHRU_EDGE (entry_bb));
if (!broken_loop)
{
remove_edge (BRANCH_EDGE (entry_bb));
make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
e = BRANCH_EDGE (l1_bb);
ne = FALLTHRU_EDGE (l1_bb);
e->flags = EDGE_TRUE_VALUE;
}
else
{
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
ne = single_succ_edge (l1_bb);
e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
}
ne->flags = EDGE_FALSE_VALUE;
e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
ne->probability = e->probability.invert ();
set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
if (simt_maxlane)
{
cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane,
NULL_TREE, NULL_TREE);
gsi = gsi_last_bb (entry_bb);
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE);
FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE;
FALLTHRU_EDGE (entry_bb)->probability
= profile_probability::guessed_always ().apply_scale (7, 8);
BRANCH_EDGE (entry_bb)->probability
= FALLTHRU_EDGE (entry_bb)->probability.invert ();
l2_dom_bb = entry_bb;
}
set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
if (!broken_loop && fd->collapse > 1)
{
basic_block last_bb = l1_bb;
basic_block init_bb = NULL;
for (i = fd->collapse - 2; i >= 0; i--)
{
tree nextn2v = NULL_TREE;
if (EDGE_SUCC (last_bb, 0)->flags & EDGE_FALSE_VALUE)
e = EDGE_SUCC (last_bb, 0);
else
e = EDGE_SUCC (last_bb, 1);
basic_block bb = split_edge (e);
if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
{
t = fold_convert (sizetype, fd->loops[i].step);
t = fold_build_pointer_plus (fd->loops[i].v, t);
}
else
{
t = fold_convert (TREE_TYPE (fd->loops[i].v),
fd->loops[i].step);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
fd->loops[i].v, t);
}
gsi = gsi_after_labels (bb);
expand_omp_build_assign (&gsi, fd->loops[i].v, t);
bb = split_block (bb, last_stmt (bb))->dest;
gsi = gsi_start_bb (bb);
tree itype = TREE_TYPE (fd->loops[i].v);
if (fd->loops[i].m2)
t = nextn2v = create_tmp_var (itype);
else
t = fold_convert (itype, fd->loops[i].n2);
t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
tree v = fd->loops[i].v;
if (DECL_P (v) && TREE_ADDRESSABLE (v))
v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
t = build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
cond_stmt = gimple_build_cond_empty (t);
gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
expand_omp_regimplify_p, NULL, NULL))
{
gsi = gsi_for_stmt (cond_stmt);
gimple_regimplify_operands (cond_stmt, &gsi);
}
ne = single_succ_edge (bb);
ne->flags = EDGE_FALSE_VALUE;
init_bb = create_empty_bb (bb);
set_immediate_dominator (CDI_DOMINATORS, init_bb, bb);
add_bb_to_loop (init_bb, bb->loop_father);
e = make_edge (bb, init_bb, EDGE_TRUE_VALUE);
e->probability
= profile_probability::guessed_always ().apply_scale (7, 8);
ne->probability = e->probability.invert ();
gsi = gsi_after_labels (init_bb);
t = fold_convert (TREE_TYPE (fd->loops[i + 1].v),
fd->loops[i + 1].n1);
if (fd->loops[i + 1].m1)
{
tree t2 = fold_convert (TREE_TYPE (t),
fd->loops[i + 1
- fd->loops[i + 1].outer].v);
tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i + 1].m1);
t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2);
}
expand_omp_build_assign (&gsi, fd->loops[i + 1].v, t);
if (fd->loops[i + 1].m2)
{
if (i + 2 == fd->collapse && (n2var || altv))
{
gcc_assert (n2v == NULL_TREE);
n2v = create_tmp_var (TREE_TYPE (fd->loops[i + 1].v));
}
t = fold_convert (TREE_TYPE (fd->loops[i + 1].v),
fd->loops[i + 1].n2);
tree t2 = fold_convert (TREE_TYPE (t),
fd->loops[i + 1
- fd->loops[i + 1].outer].v);
tree t3 = fold_convert (TREE_TYPE (t), fd->loops[i + 1].m2);
t2 = fold_build2 (MULT_EXPR, TREE_TYPE (t), t2, t3);
t = fold_build2 (PLUS_EXPR, TREE_TYPE (t), t, t2);
expand_omp_build_assign (&gsi, n2v, t);
}
if (i + 2 == fd->collapse && n2var)
{
/* For composite simd, n2 is the first iteration the current
task shouldn't already handle, so we effectively want to use
for (V3 = N31; V < N2 && V3 < N32; V++, V3 += STEP3)
as the vectorized loop. Except the vectorizer will not
vectorize that, so instead compute N2VAR as
N2VAR = V + MIN (N2 - V, COUNTS3) and use
for (V3 = N31; V < N2VAR; V++, V3 += STEP3)
as the loop to vectorize. */
tree t2 = fold_build2 (MINUS_EXPR, type, n2, fd->loop.v);
if (fd->loops[i + 1].m1 || fd->loops[i + 1].m2)
{
t = build_int_cst (itype, (fd->loops[i + 1].cond_code
== LT_EXPR ? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype,
fd->loops[i + 1].step), t);
if (fd->loops[i + 1].m2)
t = fold_build2 (PLUS_EXPR, itype, t, n2v);
else
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype,
fd->loops[i + 1].n2));
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i + 1].v));
tree step = fold_convert (itype, fd->loops[i + 1].step);
if (TYPE_UNSIGNED (itype)
&& fd->loops[i + 1].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (type, t);
}
else
t = counts[i + 1];
expand_omp_build_assign (&gsi, min_arg1, t2);
expand_omp_build_assign (&gsi, min_arg2, t);
e = split_block (init_bb, last_stmt (init_bb));
gsi = gsi_after_labels (e->dest);
init_bb = e->dest;
remove_edge (FALLTHRU_EDGE (entry_bb));
make_edge (entry_bb, init_bb, EDGE_FALLTHRU);
set_immediate_dominator (CDI_DOMINATORS, init_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, l1_bb, init_bb);
t = fold_build2 (MIN_EXPR, type, min_arg1, min_arg2);
t = fold_build2 (PLUS_EXPR, type, fd->loop.v, t);
expand_omp_build_assign (&gsi, n2var, t);
}
if (i + 2 == fd->collapse && altv)
{
/* The vectorizer currently punts on loops with non-constant
steps for the main IV (can't compute number of iterations
and gives up because of that). As for OpenMP loops it is
always possible to compute the number of iterations upfront,
use an alternate IV as the loop iterator. */
expand_omp_build_assign (&gsi, altv,
build_zero_cst (TREE_TYPE (altv)));
tree itype = TREE_TYPE (fd->loops[i + 1].v);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
t = build_int_cst (itype, (fd->loops[i + 1].cond_code == LT_EXPR
? -1 : 1));
t = fold_build2 (PLUS_EXPR, itype,
fold_convert (itype, fd->loops[i + 1].step), t);
t = fold_build2 (PLUS_EXPR, itype, t,
fold_convert (itype,
fd->loops[i + 1].m2
? n2v : fd->loops[i + 1].n2));
t = fold_build2 (MINUS_EXPR, itype, t,
fold_convert (itype, fd->loops[i + 1].v));
tree step = fold_convert (itype, fd->loops[i + 1].step);
if (TYPE_UNSIGNED (itype)
&& fd->loops[i + 1].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype, step));
else
t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (TREE_TYPE (altv), t);
expand_omp_build_assign (&gsi, altn2, t);
tree t2 = fold_convert (TREE_TYPE (fd->loops[i + 1].v),
fd->loops[i + 1].m2
? n2v : fd->loops[i + 1].n2);
t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
true, GSI_SAME_STMT);
t2 = fold_build2 (fd->loops[i + 1].cond_code, boolean_type_node,
fd->loops[i + 1].v, t2);
gassign *g
= gimple_build_assign (altn2, COND_EXPR, t2, altn2,
build_zero_cst (TREE_TYPE (altv)));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
n2v = nextn2v;
make_edge (init_bb, last_bb, EDGE_FALLTHRU);
if (!gimple_omp_for_combined_into_p (fd->for_stmt))
{
e = find_edge (entry_bb, last_bb);
redirect_edge_succ (e, bb);
set_immediate_dominator (CDI_DOMINATORS, bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, last_bb, init_bb);
}
last_bb = bb;
}
}
if (!broken_loop)
{
class loop *loop = alloc_loop ();
loop->header = l1_bb;
loop->latch = cont_bb;
add_loop (loop, l1_bb->loop_father);
loop->safelen = safelen_int;
if (simduid)
{
loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
cfun->has_simduid_loops = true;
}
/* If not -fno-tree-loop-vectorize, hint that we want to vectorize
the loop. */
if ((flag_tree_loop_vectorize
|| !global_options_set.x_flag_tree_loop_vectorize)
&& flag_tree_loop_optimize
&& loop->safelen > 1)
{
loop->force_vectorize = true;
if (simdlen && tree_fits_uhwi_p (OMP_CLAUSE_SIMDLEN_EXPR (simdlen)))
{
unsigned HOST_WIDE_INT v
= tree_to_uhwi (OMP_CLAUSE_SIMDLEN_EXPR (simdlen));
if (v < INT_MAX && v <= (unsigned HOST_WIDE_INT) loop->safelen)
loop->simdlen = v;
}
cfun->has_force_vectorize_loops = true;
}
else if (dont_vectorize)
loop->dont_vectorize = true;
}
else if (simduid)
cfun->has_simduid_loops = true;
}
/* Taskloop construct is represented after gimplification with
two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
in between them. This routine expands the outer GIMPLE_OMP_FOR,
which should just compute all the needed loop temporaries
for GIMPLE_OMP_TASK. */
static void
expand_omp_taskloop_for_outer (struct omp_region *region,
struct omp_for_data *fd,
gimple *inner_stmt)
{
tree type, bias = NULL_TREE;
basic_block entry_bb, cont_bb, exit_bb;
gimple_stmt_iterator gsi;
gassign *assign_stmt;
tree *counts = NULL;
int i;
gcc_assert (inner_stmt);
gcc_assert (region->cont);
gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskloop_p (inner_stmt));
type = TREE_TYPE (fd->loop.v);
/* See if we need to bias by LLONG_MIN. */
if (fd->iter_type == long_long_unsigned_type_node
&& TREE_CODE (type) == INTEGER_TYPE
&& !TYPE_UNSIGNED (type))
{
tree n1, n2;
if (fd->loop.cond_code == LT_EXPR)
{
n1 = fd->loop.n1;
n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
}
else
{
n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
n2 = fd->loop.n1;
}
if (TREE_CODE (n1) != INTEGER_CST
|| TREE_CODE (n2) != INTEGER_CST
|| ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
}
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
exit_bb = region->exit;
gsi = gsi_last_nondebug_bb (entry_bb);
gimple *for_stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
{
int first_zero_iter = -1, dummy = -1;
basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL;
counts = XALLOCAVEC (tree, fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
zero_iter_bb, first_zero_iter,
dummy_bb, dummy, l2_dom_bb);
if (zero_iter_bb)
{
/* Some counts[i] vars might be uninitialized if
some loop has zero iterations. But the body shouldn't
be executed in that case, so just avoid uninit warnings. */
for (i = first_zero_iter; i < fd->collapse; i++)
if (SSA_VAR_P (counts[i]))
TREE_NO_WARNING (counts[i]) = 1;
gsi_prev (&gsi);
edge e = split_block (entry_bb, gsi_stmt (gsi));
entry_bb = e->dest;
make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
gsi = gsi_last_bb (entry_bb);
set_immediate_dominator (CDI_DOMINATORS, entry_bb,
get_immediate_dominator (CDI_DOMINATORS,
zero_iter_bb));
}
}
tree t0, t1;
t1 = fd->loop.n2;
t0 = fd->loop.n1;
if (POINTER_TYPE_P (TREE_TYPE (t0))
&& TYPE_PRECISION (TREE_TYPE (t0))
!= TYPE_PRECISION (fd->iter_type))
{
/* Avoid casting pointers to integer of a different size. */
tree itype = signed_type_for (type);
t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
}
else
{
t1 = fold_convert (fd->iter_type, t1);
t0 = fold_convert (fd->iter_type, t0);
}
if (bias)
{
t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
}
tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
tree startvar = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
tree endvar = OMP_CLAUSE_DECL (innerc);
if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
{
innerc = find_lastprivate_looptemp (fd, innerc);
if (innerc)
{
/* If needed (inner taskloop has lastprivate clause), propagate
down the total number of iterations. */
tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false,
NULL_TREE, false,
GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
}
t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false,
GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (startvar, t0);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false,
GSI_CONTINUE_LINKING);
assign_stmt = gimple_build_assign (endvar, t1);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (fd->collapse > 1)
expand_omp_for_init_vars (fd, &gsi, counts, NULL, inner_stmt, startvar);
/* Remove the GIMPLE_OMP_FOR statement. */
gsi = gsi_for_stmt (for_stmt);
gsi_remove (&gsi, true);
gsi = gsi_last_nondebug_bb (cont_bb);
gsi_remove (&gsi, true);
gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
remove_edge (BRANCH_EDGE (entry_bb));
FALLTHRU_EDGE (cont_bb)->probability = profile_probability::always ();
remove_edge (BRANCH_EDGE (cont_bb));
set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb);
set_immediate_dominator (CDI_DOMINATORS, region->entry,
recompute_dominator (CDI_DOMINATORS, region->entry));
}
/* Taskloop construct is represented after gimplification with
two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
in between them. This routine expands the inner GIMPLE_OMP_FOR.
GOMP_taskloop{,_ull} function arranges for each task to be given just
a single range of iterations. */
static void
expand_omp_taskloop_for_inner (struct omp_region *region,
struct omp_for_data *fd,
gimple *inner_stmt)
{
tree e, t, type, itype, vmain, vback, bias = NULL_TREE;
basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL;
basic_block fin_bb;
gimple_stmt_iterator gsi;
edge ep;
bool broken_loop = region->cont == NULL;
tree *counts = NULL;
tree n1, n2, step;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
itype = signed_type_for (type);
/* See if we need to bias by LLONG_MIN. */
if (fd->iter_type == long_long_unsigned_type_node
&& TREE_CODE (type) == INTEGER_TYPE
&& !TYPE_UNSIGNED (type))
{
tree n1, n2;
if (fd->loop.cond_code == LT_EXPR)
{
n1 = fd->loop.n1;
n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
}
else
{
n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
n2 = fd->loop.n1;
}
if (TREE_CODE (n1) != INTEGER_CST
|| TREE_CODE (n2) != INTEGER_CST
|| ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
}
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
fin_bb = BRANCH_EDGE (entry_bb)->dest;
gcc_assert (broken_loop
|| (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
body_bb = FALLTHRU_EDGE (entry_bb)->dest;
if (!broken_loop)
{
gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
}
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
{
int first_zero_iter = -1, dummy = -1;
basic_block l2_dom_bb = NULL, dummy_bb = NULL;
counts = XALLOCAVEC (tree, fd->collapse);
expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
fin_bb, first_zero_iter,
dummy_bb, dummy, l2_dom_bb);
t = NULL_TREE;
}
else
t = integer_one_node;
step = fd->loop.step;
tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n1 = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
n2 = OMP_CLAUSE_DECL (innerc);
if (bias)
{
n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias);
n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias);
}
n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
true, NULL_TREE, true, GSI_SAME_STMT);
n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
true, NULL_TREE, true, GSI_SAME_STMT);
step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
true, NULL_TREE, true, GSI_SAME_STMT);
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
if (gimple_omp_for_combined_p (fd->for_stmt))
{
tree clauses = gimple_omp_for_clauses (inner_stmt);
tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
startvar = OMP_CLAUSE_DECL (innerc);
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
endvar = OMP_CLAUSE_DECL (innerc);
}
t = fold_convert (TREE_TYPE (startvar), n1);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (startvar)
&& TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
gimple *assign_stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
t = fold_convert (TREE_TYPE (startvar), n2);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
if (endvar)
{
assign_stmt = gimple_build_assign (endvar, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
assign_stmt = gimple_build_assign (fd->loop.v, e);
else
assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
}
tree *nonrect_bounds = NULL;
if (fd->collapse > 1)
{
if (fd->non_rect)
{
nonrect_bounds = XALLOCAVEC (tree, fd->last_nonrect + 1);
memset (nonrect_bounds, 0, sizeof (tree) * (fd->last_nonrect + 1));
}
gcc_assert (gsi_bb (gsi) == entry_bb);
expand_omp_for_init_vars (fd, &gsi, counts, nonrect_bounds, inner_stmt,
startvar);
entry_bb = gsi_bb (gsi);
}
if (!broken_loop)
{
/* The code controlling the sequential loop replaces the
GIMPLE_OMP_CONTINUE. */
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, step);
else
t = fold_build2 (PLUS_EXPR, type, vmain, step);
t = force_gimple_operand_gsi (&gsi, t,
DECL_P (vback)
&& TREE_ADDRESSABLE (vback),
NULL_TREE, true, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (vback, t);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
t = build2 (fd->loop.cond_code, boolean_type_node,
DECL_P (vback) && TREE_ADDRESSABLE (vback)
? t : vback, e);
gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
}
/* Remove the GIMPLE_OMP_CONTINUE statement. */
gsi_remove (&gsi, true);
if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, nonrect_bounds,
cont_bb, body_bb);
}
/* Remove the GIMPLE_OMP_FOR statement. */
gsi = gsi_for_stmt (fd->for_stmt);
gsi_remove (&gsi, true);
/* Remove the GIMPLE_OMP_RETURN statement. */
gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
if (!broken_loop)
remove_edge (BRANCH_EDGE (entry_bb));
else
{
remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb));
region->outer->cont = NULL;
}
/* Connect all the blocks. */
if (!broken_loop)
{
ep = find_edge (cont_bb, body_bb);
if (gimple_omp_for_combined_p (fd->for_stmt))
{
remove_edge (ep);
ep = NULL;
}
else if (fd->collapse > 1)
{
remove_edge (ep);
ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
}
else
ep->flags = EDGE_TRUE_VALUE;
find_edge (cont_bb, fin_bb)->flags
= ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
}
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
if (!broken_loop)
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
recompute_dominator (CDI_DOMINATORS, fin_bb));
if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
{
class loop *loop = alloc_loop ();
loop->header = body_bb;
if (collapse_bb == NULL)
loop->latch = cont_bb;
add_loop (loop, body_bb->loop_father);
}
}
/* A subroutine of expand_omp_for. Generate code for an OpenACC
partitioned loop. The lowering here is abstracted, in that the
loop parameters are passed through internal functions, which are
further lowered by oacc_device_lower, once we get to the target
compiler. The loop is of the form:
for (V = B; V LTGT E; V += S) {BODY}
where LTGT is < or >. We may have a specified chunking size, CHUNKING
(constant 0 for no chunking) and we will have a GWV partitioning
mask, specifying dimensions over which the loop is to be
partitioned (see note below). We generate code that looks like
(this ignores tiling):
<entry_bb> [incoming FALL->body, BRANCH->exit]
typedef signedintify (typeof (V)) T; // underlying signed integral type
T range = E - B;
T chunk_no = 0;
T DIR = LTGT == '<' ? +1 : -1;
T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
<head_bb> [created by splitting end of entry_bb]
T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
if (!(offset LTGT bound)) goto bottom_bb;
<body_bb> [incoming]
V = B + offset;
{BODY}
<cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
offset += step;
if (offset LTGT bound) goto body_bb; [*]
<bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
chunk_no++;
if (chunk < chunk_max) goto head_bb;
<exit_bb> [incoming]
V = B + ((range -/+ 1) / S +/- 1) * S [*]
[*] Needed if V live at end of loop. */
static void
expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
{
tree v = fd->loop.v;
enum tree_code cond_code = fd->loop.cond_code;
enum tree_code plus_code = PLUS_EXPR;
tree chunk_size = integer_minus_one_node;
tree gwv = integer_zero_node;
tree iter_type = TREE_TYPE (v);
tree diff_type = iter_type;
tree plus_type = iter_type;
struct oacc_collapse *counts = NULL;
gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt)
== GF_OMP_FOR_KIND_OACC_LOOP);
gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt));
gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR);
if (POINTER_TYPE_P (iter_type))
{
plus_code = POINTER_PLUS_EXPR;
plus_type = sizetype;
}
if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
diff_type = signed_type_for (diff_type);
if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node))
diff_type = integer_type_node;
basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */
basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */
basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */
basic_block bottom_bb = NULL;
/* entry_bb has two successors; the branch edge is to the exit
block, fallthrough edge to body. */
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2
&& BRANCH_EDGE (entry_bb)->dest == exit_bb);
/* If cont_bb non-NULL, it has 2 successors. The branch successor is
body_bb, or to a block whose only successor is the body_bb. Its
fallthrough successor is the final block (same as the branch
successor of the entry_bb). */
if (cont_bb)
{
basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest;
basic_block bed = BRANCH_EDGE (cont_bb)->dest;
gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb);
gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb);
}
else
gcc_assert (!gimple_in_ssa_p (cfun));
/* The exit block only has entry_bb and cont_bb as predecessors. */
gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL));
tree chunk_no;
tree chunk_max = NULL_TREE;
tree bound, offset;
tree step = create_tmp_var (diff_type, ".step");
bool up = cond_code == LT_EXPR;
tree dir = build_int_cst (diff_type, up ? +1 : -1);
bool chunking = !gimple_in_ssa_p (cfun);
bool negating;
/* Tiling vars. */
tree tile_size = NULL_TREE;
tree element_s = NULL_TREE;
tree e_bound = NULL_TREE, e_offset = NULL_TREE, e_step = NULL_TREE;
basic_block elem_body_bb = NULL;
basic_block elem_cont_bb = NULL;
/* SSA instances. */
tree offset_incr = NULL_TREE;
tree offset_init = NULL_TREE;
gimple_stmt_iterator gsi;
gassign *ass;
gcall *call;
gimple *stmt;
tree expr;
location_t loc;
edge split, be, fte;
/* Split the end of entry_bb to create head_bb. */
split = split_block (entry_bb, last_stmt (entry_bb));
basic_block head_bb = split->dest;
entry_bb = split->src;
/* Chunk setup goes at end of entry_bb, replacing the omp_for. */
gsi = gsi_last_nondebug_bb (entry_bb);
gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
loc = gimple_location (for_stmt);
if (gimple_in_ssa_p (cfun))
{
offset_init = gimple_omp_for_index (for_stmt, 0);
gcc_assert (integer_zerop (fd->loop.n1));
/* The SSA parallelizer does gang parallelism. */
gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG));
}
if (fd->collapse > 1 || fd->tiling)
{
gcc_assert (!gimple_in_ssa_p (cfun) && up);
counts = XALLOCAVEC (struct oacc_collapse, fd->collapse);
tree total = expand_oacc_collapse_init (fd, &gsi, counts,
TREE_TYPE (fd->loop.n2), loc);
if (SSA_VAR_P (fd->loop.n2))
{
total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (fd->loop.n2, total);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
}
}
tree b = fd->loop.n1;
tree e = fd->loop.n2;
tree s = fd->loop.step;
b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT);
e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT);
/* Convert the step, avoiding possible unsigned->signed overflow. */
negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
if (negating)
s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
s = fold_convert (diff_type, s);
if (negating)
s = fold_build1 (NEGATE_EXPR, diff_type, s);
s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT);
if (!chunking)
chunk_size = integer_zero_node;
expr = fold_convert (diff_type, chunk_size);
chunk_size = force_gimple_operand_gsi (&gsi, expr, true,
NULL_TREE, true, GSI_SAME_STMT);
if (fd->tiling)
{
/* Determine the tile size and element step,
modify the outer loop step size. */
tile_size = create_tmp_var (diff_type, ".tile_size");
expr = build_int_cst (diff_type, 1);
for (int ix = 0; ix < fd->collapse; ix++)
expr = fold_build2 (MULT_EXPR, diff_type, counts[ix].tile, expr);
expr = force_gimple_operand_gsi (&gsi, expr, true,
NULL_TREE, true, GSI_SAME_STMT);
ass = gimple_build_assign (tile_size, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
element_s = create_tmp_var (diff_type, ".element_s");
ass = gimple_build_assign (element_s, s);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
expr = fold_build2 (MULT_EXPR, diff_type, s, tile_size);
s = force_gimple_operand_gsi (&gsi, expr, true,
NULL_TREE, true, GSI_SAME_STMT);
}
/* Determine the range, avoiding possible unsigned->signed overflow. */
negating = !up && TYPE_UNSIGNED (iter_type);
expr = fold_build2 (MINUS_EXPR, plus_type,
fold_convert (plus_type, negating ? b : e),
fold_convert (plus_type, negating ? e : b));
expr = fold_convert (diff_type, expr);
if (negating)
expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
tree range = force_gimple_operand_gsi (&gsi, expr, true,
NULL_TREE, true, GSI_SAME_STMT);
chunk_no = build_int_cst (diff_type, 0);
if (chunking)
{
gcc_assert (!gimple_in_ssa_p (cfun));
expr = chunk_no;
chunk_max = create_tmp_var (diff_type, ".chunk_max");
chunk_no = create_tmp_var (diff_type, ".chunk_no");
ass = gimple_build_assign (chunk_no, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
build_int_cst (integer_type_node,
IFN_GOACC_LOOP_CHUNKS),
dir, range, s, chunk_size, gwv);
gimple_call_set_lhs (call, chunk_max);
gimple_set_location (call, loc);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
}
else
chunk_size = chunk_no;
call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
build_int_cst (integer_type_node,
IFN_GOACC_LOOP_STEP),
dir, range, s, chunk_size, gwv);
gimple_call_set_lhs (call, step);
gimple_set_location (call, loc);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
/* Remove the GIMPLE_OMP_FOR. */
gsi_remove (&gsi, true);
/* Fixup edges from head_bb. */
be = BRANCH_EDGE (head_bb);
fte = FALLTHRU_EDGE (head_bb);
be->flags |= EDGE_FALSE_VALUE;
fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
basic_block body_bb = fte->dest;
if (gimple_in_ssa_p (cfun))
{
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
offset = gimple_omp_continue_control_use (cont_stmt);
offset_incr = gimple_omp_continue_control_def (cont_stmt);
}
else
{
offset = create_tmp_var (diff_type, ".offset");
offset_init = offset_incr = offset;
}
bound = create_tmp_var (TREE_TYPE (offset), ".bound");
/* Loop offset & bound go into head_bb. */
gsi = gsi_start_bb (head_bb);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
build_int_cst (integer_type_node,
IFN_GOACC_LOOP_OFFSET),
dir, range, s,
chunk_size, gwv, chunk_no);
gimple_call_set_lhs (call, offset_init);
gimple_set_location (call, loc);
gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
build_int_cst (integer_type_node,
IFN_GOACC_LOOP_BOUND),
dir, range, s,
chunk_size, gwv, offset_init);
gimple_call_set_lhs (call, bound);
gimple_set_location (call, loc);
gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
expr = build2 (cond_code, boolean_type_node, offset_init, bound);
gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
GSI_CONTINUE_LINKING);
/* V assignment goes into body_bb. */
if (!gimple_in_ssa_p (cfun))
{
gsi = gsi_start_bb (body_bb);
expr = build2 (plus_code, iter_type, b,
fold_convert (plus_type, offset));
expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (v, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
if (fd->collapse > 1 || fd->tiling)
expand_oacc_collapse_vars (fd, false, &gsi, counts, v);
if (fd->tiling)
{
/* Determine the range of the element loop -- usually simply
the tile_size, but could be smaller if the final
iteration of the outer loop is a partial tile. */
tree e_range = create_tmp_var (diff_type, ".e_range");
expr = build2 (MIN_EXPR, diff_type,
build2 (MINUS_EXPR, diff_type, bound, offset),
build2 (MULT_EXPR, diff_type, tile_size,
element_s));
expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (e_range, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
/* Determine bound, offset & step of inner loop. */
e_bound = create_tmp_var (diff_type, ".e_bound");
e_offset = create_tmp_var (diff_type, ".e_offset");
e_step = create_tmp_var (diff_type, ".e_step");
/* Mark these as element loops. */
tree t, e_gwv = integer_minus_one_node;
tree chunk = build_int_cst (diff_type, 0); /* Never chunked. */
t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range,
element_s, chunk, e_gwv, chunk);
gimple_call_set_lhs (call, e_offset);
gimple_set_location (call, loc);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range,
element_s, chunk, e_gwv, e_offset);
gimple_call_set_lhs (call, e_bound);
gimple_set_location (call, loc);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP);
call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, t, dir, e_range,
element_s, chunk, e_gwv);
gimple_call_set_lhs (call, e_step);
gimple_set_location (call, loc);
gsi_insert_before (&gsi, call, GSI_SAME_STMT);
/* Add test and split block. */
expr = build2 (cond_code, boolean_type_node, e_offset, e_bound);
stmt = gimple_build_cond_empty (expr);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
split = split_block (body_bb, stmt);
elem_body_bb = split->dest;
if (cont_bb == body_bb)
cont_bb = elem_body_bb;
body_bb = split->src;
split->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
/* Add a dummy exit for the tiled block when cont_bb is missing. */
if (cont_bb == NULL)
{
edge e = make_edge (body_bb, exit_bb, EDGE_FALSE_VALUE);
e->probability = profile_probability::even ();
split->probability = profile_probability::even ();
}
/* Initialize the user's loop vars. */
gsi = gsi_start_bb (elem_body_bb);
expand_oacc_collapse_vars (fd, true, &gsi, counts, e_offset);
}
}
/* Loop increment goes into cont_bb. If this is not a loop, we
will have spawned threads as if it was, and each one will
execute one iteration. The specification is not explicit about
whether such constructs are ill-formed or not, and they can
occur, especially when noreturn routines are involved. */
if (cont_bb)
{
gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
loc = gimple_location (cont_stmt);
if (fd->tiling)
{
/* Insert element loop increment and test. */
expr = build2 (PLUS_EXPR, diff_type, e_offset, e_step);
expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (e_offset, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
expr = build2 (cond_code, boolean_type_node, e_offset, e_bound);
stmt = gimple_build_cond_empty (expr);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
split = split_block (cont_bb, stmt);
elem_cont_bb = split->src;
cont_bb = split->dest;
split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
split->probability = profile_probability::unlikely ().guessed ();
edge latch_edge
= make_edge (elem_cont_bb, elem_body_bb, EDGE_TRUE_VALUE);
latch_edge->probability = profile_probability::likely ().guessed ();
edge skip_edge = make_edge (body_bb, cont_bb, EDGE_FALSE_VALUE);
skip_edge->probability = profile_probability::unlikely ().guessed ();
edge loop_entry_edge = EDGE_SUCC (body_bb, 1 - skip_edge->dest_idx);
loop_entry_edge->probability
= profile_probability::likely ().guessed ();
gsi = gsi_for_stmt (cont_stmt);
}
/* Increment offset. */
if (gimple_in_ssa_p (cfun))
expr = build2 (plus_code, iter_type, offset,
fold_convert (plus_type, step));
else
expr = build2 (PLUS_EXPR, diff_type, offset, step);
expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (offset_incr, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
expr = build2 (cond_code, boolean_type_node, offset_incr, bound);
gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT);
/* Remove the GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
/* Fixup edges from cont_bb. */
be = BRANCH_EDGE (cont_bb);
fte = FALLTHRU_EDGE (cont_bb);
be->flags |= EDGE_TRUE_VALUE;
fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
if (chunking)
{
/* Split the beginning of exit_bb to make bottom_bb. We
need to insert a nop at the start, because splitting is
after a stmt, not before. */
gsi = gsi_start_bb (exit_bb);
stmt = gimple_build_nop ();
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
split = split_block (exit_bb, stmt);
bottom_bb = split->src;
exit_bb = split->dest;
gsi = gsi_last_bb (bottom_bb);
/* Chunk increment and test goes into bottom_bb. */
expr = build2 (PLUS_EXPR, diff_type, chunk_no,
build_int_cst (diff_type, 1));
ass = gimple_build_assign (chunk_no, expr);
gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING);
/* Chunk test at end of bottom_bb. */
expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max);
gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
GSI_CONTINUE_LINKING);
/* Fixup edges from bottom_bb. */
split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
split->probability = profile_probability::unlikely ().guessed ();
edge latch_edge = make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE);
latch_edge->probability = profile_probability::likely ().guessed ();
}
}
gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
loc = gimple_location (gsi_stmt (gsi));
if (!gimple_in_ssa_p (cfun))
{
/* Insert the final value of V, in case it is live. This is the
value for the only thread that survives past the join. */
expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
expr = fold_build2 (MULT_EXPR, diff_type, expr, s);
expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr));
expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
true, GSI_SAME_STMT);
ass = gimple_build_assign (v, expr);
gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
}
/* Remove the OMP_RETURN. */
gsi_remove (&gsi, true);
if (cont_bb)
{
/* We now have one, two or three nested loops. Update the loop
structures. */
class loop *parent = entry_bb->loop_father;
class loop *body = body_bb->loop_father;
if (chunking)
{
class loop *chunk_loop = alloc_loop ();
chunk_loop->header = head_bb;
chunk_loop->latch = bottom_bb;
add_loop (chunk_loop, parent);
parent = chunk_loop;
}
else if (parent != body)
{
gcc_assert (body->header == body_bb);
gcc_assert (body->latch == cont_bb
|| single_pred (body->latch) == cont_bb);
parent = NULL;
}
if (parent)
{
class loop *body_loop = alloc_loop ();
body_loop->header = body_bb;
body_loop->latch = cont_bb;
add_loop (body_loop, parent);
if (fd->tiling)
{
/* Insert tiling's element loop. */
class loop *inner_loop = alloc_loop ();
inner_loop->header = elem_body_bb;
inner_loop->latch = elem_cont_bb;
add_loop (inner_loop, body_loop);
}
}
}
}
/* Expand the OMP loop defined by REGION. */
static void
expand_omp_for (struct omp_region *region, gimple *inner_stmt)
{
struct omp_for_data fd;
struct omp_for_data_loop *loops;
loops = XALLOCAVEC (struct omp_for_data_loop,
gimple_omp_for_collapse (last_stmt (region->entry)));
omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
&fd, loops);
region->sched_kind = fd.sched_kind;
region->sched_modifiers = fd.sched_modifiers;
region->has_lastprivate_conditional = fd.lastprivate_conditional != 0;
if (fd.non_rect && !gimple_omp_for_combined_into_p (fd.for_stmt))
{
for (int i = fd.first_nonrect; i <= fd.last_nonrect; i++)
if ((loops[i].m1 || loops[i].m2)
&& (loops[i].m1 == NULL_TREE
|| TREE_CODE (loops[i].m1) == INTEGER_CST)
&& (loops[i].m2 == NULL_TREE
|| TREE_CODE (loops[i].m2) == INTEGER_CST)
&& TREE_CODE (loops[i].step) == INTEGER_CST
&& TREE_CODE (loops[i - loops[i].outer].step) == INTEGER_CST)
{
tree t;
tree itype = TREE_TYPE (loops[i].v);
if (loops[i].m1 && loops[i].m2)
t = fold_build2 (MINUS_EXPR, itype, loops[i].m2, loops[i].m1);
else if (loops[i].m1)
t = fold_build1 (NEGATE_EXPR, itype, loops[i].m1);
else
t = loops[i].m2;
t = fold_build2 (MULT_EXPR, itype, t,
fold_convert (itype,
loops[i - loops[i].outer].step));
if (TYPE_UNSIGNED (itype) && loops[i].cond_code == GT_EXPR)
t = fold_build2 (TRUNC_MOD_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
fold_build1 (NEGATE_EXPR, itype,
fold_convert (itype,
loops[i].step)));
else
t = fold_build2 (TRUNC_MOD_EXPR, itype, t,
fold_convert (itype, loops[i].step));
if (integer_nonzerop (t))
error_at (gimple_location (fd.for_stmt),
"invalid OpenMP non-rectangular loop step; "
"%<(%E - %E) * %E%> is not a multiple of loop %d "
"step %qE",
loops[i].m2 ? loops[i].m2 : integer_zero_node,
loops[i].m1 ? loops[i].m1 : integer_zero_node,
loops[i - loops[i].outer].step, i + 1,
loops[i].step);
}
}
gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
if (region->cont)
{
gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
}
else
/* If there isn't a continue then this is a degerate case where
the introduction of abnormal edges during lowering will prevent
original loops from being detected. Fix that up. */
loops_state_set (LOOPS_NEED_FIXUP);
if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
expand_omp_simd (region, &fd);
else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
gcc_assert (!inner_stmt && !fd.non_rect);
expand_oacc_for (region, &fd);
}
else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP)
{
if (gimple_omp_for_combined_into_p (fd.for_stmt))
expand_omp_taskloop_for_inner (region, &fd, inner_stmt);
else
expand_omp_taskloop_for_outer (region, &fd, inner_stmt);
}
else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
&& !fd.have_ordered)
{
if (fd.chunk_size == NULL)
expand_omp_for_static_nochunk (region, &fd, inner_stmt);
else
expand_omp_for_static_chunk (region, &fd, inner_stmt);
}
else
{
int fn_index, start_ix, next_ix;
unsigned HOST_WIDE_INT sched = 0;
tree sched_arg = NULL_TREE;
gcc_assert (gimple_omp_for_kind (fd.for_stmt)
== GF_OMP_FOR_KIND_FOR && !fd.non_rect);
if (fd.chunk_size == NULL
&& fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
fd.chunk_size = integer_zero_node;
switch (fd.sched_kind)
{
case OMP_CLAUSE_SCHEDULE_RUNTIME:
if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) != 0
&& fd.lastprivate_conditional == 0)
{
gcc_assert (!fd.have_ordered);
fn_index = 6;
sched = 4;
}
else if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0
&& !fd.have_ordered
&& fd.lastprivate_conditional == 0)
fn_index = 7;
else
{
fn_index = 3;
sched = (HOST_WIDE_INT_1U << 31);
}
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
case OMP_CLAUSE_SCHEDULE_GUIDED:
if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_MONOTONIC) == 0
&& !fd.have_ordered
&& fd.lastprivate_conditional == 0)
{
fn_index = 3 + fd.sched_kind;
sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2;
break;
}
fn_index = fd.sched_kind;
sched = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_GUIDED) + 2;
sched += (HOST_WIDE_INT_1U << 31);
break;
case OMP_CLAUSE_SCHEDULE_STATIC:
gcc_assert (fd.have_ordered);
fn_index = 0;
sched = (HOST_WIDE_INT_1U << 31) + 1;
break;
default:
gcc_unreachable ();
}
if (!fd.ordered)
fn_index += fd.have_ordered * 8;
if (fd.ordered)
start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index;
else
start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
if (fd.have_reductemp || fd.have_pointer_condtemp)
{
if (fd.ordered)
start_ix = (int)BUILT_IN_GOMP_LOOP_DOACROSS_START;
else if (fd.have_ordered)
start_ix = (int)BUILT_IN_GOMP_LOOP_ORDERED_START;
else
start_ix = (int)BUILT_IN_GOMP_LOOP_START;
sched_arg = build_int_cstu (long_integer_type_node, sched);
if (!fd.chunk_size)
fd.chunk_size = integer_zero_node;
}
if (fd.iter_type == long_long_unsigned_type_node)
{
start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
- (int)BUILT_IN_GOMP_LOOP_STATIC_START);
next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
- (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
}
expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
(enum built_in_function) next_ix, sched_arg,
inner_stmt);
}
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_only_virtuals);
}
/* Expand code for an OpenMP sections directive. In pseudo code, we generate
v = GOMP_sections_start (n);
L0:
switch (v)
{
case 0:
goto L2;
case 1:
section 1;
goto L1;
case 2:
...
case n:
...
default:
abort ();
}
L1:
v = GOMP_sections_next ();
goto L0;
L2:
reduction;
If this is a combined parallel sections, replace the call to
GOMP_sections_start with call to GOMP_sections_next. */
static void
expand_omp_sections (struct omp_region *region)
{
tree t, u, vin = NULL, vmain, vnext, l2;
unsigned len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
gimple_stmt_iterator si, switch_si;
gomp_sections *sections_stmt;
gimple *stmt;
gomp_continue *cont;
edge_iterator ei;
edge e;
struct omp_region *inner;
unsigned i, casei;
bool exit_reachable = region->cont != NULL;
gcc_assert (region->exit != NULL);
entry_bb = region->entry;
l0_bb = single_succ (entry_bb);
l1_bb = region->cont;
l2_bb = region->exit;
if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
l2 = gimple_block_label (l2_bb);
else
{
/* This can happen if there are reductions. */
len = EDGE_COUNT (l0_bb->succs);
gcc_assert (len > 0);
e = EDGE_SUCC (l0_bb, len - 1);
si = gsi_last_nondebug_bb (e->dest);
l2 = NULL_TREE;
if (gsi_end_p (si)
|| gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
l2 = gimple_block_label (e->dest);
else
FOR_EACH_EDGE (e, ei, l0_bb->succs)
{
si = gsi_last_nondebug_bb (e->dest);
if (gsi_end_p (si)
|| gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
{
l2 = gimple_block_label (e->dest);
break;
}
}
}
if (exit_reachable)
default_bb = create_empty_bb (l1_bb->prev_bb);
else
default_bb = create_empty_bb (l0_bb);
/* We will build a switch() with enough cases for all the
GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
and a default case to abort if something goes wrong. */
len = EDGE_COUNT (l0_bb->succs);
/* Use vec::quick_push on label_vec throughout, since we know the size
in advance. */
auto_vec<tree> label_vec (len);
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
GIMPLE_OMP_SECTIONS statement. */
si = gsi_last_nondebug_bb (entry_bb);
sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
vin = gimple_omp_sections_control (sections_stmt);
tree clauses = gimple_omp_sections_clauses (sections_stmt);
tree reductmp = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tree condtmp = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_);
tree cond_var = NULL_TREE;
if (reductmp || condtmp)
{
tree reductions = null_pointer_node, mem = null_pointer_node;
tree memv = NULL_TREE, condtemp = NULL_TREE;
gimple_stmt_iterator gsi = gsi_none ();
gimple *g = NULL;
if (reductmp)
{
reductions = OMP_CLAUSE_DECL (reductmp);
gcc_assert (TREE_CODE (reductions) == SSA_NAME);
g = SSA_NAME_DEF_STMT (reductions);
reductions = gimple_assign_rhs1 (g);
OMP_CLAUSE_DECL (reductmp) = reductions;
gsi = gsi_for_stmt (g);
}
else
gsi = si;
if (condtmp)
{
condtemp = OMP_CLAUSE_DECL (condtmp);
tree c = omp_find_clause (OMP_CLAUSE_CHAIN (condtmp),
OMP_CLAUSE__CONDTEMP_);
cond_var = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (condtemp);
memv = create_tmp_var (type);
TREE_ADDRESSABLE (memv) = 1;
unsigned cnt = 0;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
++cnt;
unsigned HOST_WIDE_INT sz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))) * cnt;
expand_omp_build_assign (&gsi, memv, build_int_cst (type, sz),
false);
mem = build_fold_addr_expr (memv);
}
t = build_int_cst (unsigned_type_node, len - 1);
u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS2_START);
stmt = gimple_build_call (u, 3, t, reductions, mem);
gimple_call_set_lhs (stmt, vin);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
if (condtmp)
{
expand_omp_build_assign (&gsi, condtemp, memv, false);
tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var),
vin, build_one_cst (TREE_TYPE (cond_var)));
expand_omp_build_assign (&gsi, cond_var, t, false);
}
if (reductmp)
{
gsi_remove (&gsi, true);
release_ssa_name (gimple_assign_lhs (g));
}
}
else if (!is_combined_parallel (region))
{
/* If we are not inside a combined parallel+sections region,
call GOMP_sections_start. */
t = build_int_cst (unsigned_type_node, len - 1);
u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
stmt = gimple_build_call (u, 1, t);
}
else
{
/* Otherwise, call GOMP_sections_next. */
u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
stmt = gimple_build_call (u, 0);
}
if (!reductmp && !condtmp)
{
gimple_call_set_lhs (stmt, vin);
gsi_insert_after (&si, stmt, GSI_SAME_STMT);
}
gsi_remove (&si, true);
/* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
L0_BB. */
switch_si = gsi_last_nondebug_bb (l0_bb);
gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
if (exit_reachable)
{
cont = as_a <gomp_continue *> (last_stmt (l1_bb));
gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont);
vnext = gimple_omp_continue_control_def (cont);
}
else
{
vmain = vin;
vnext = NULL_TREE;
}
t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
label_vec.quick_push (t);
i = 1;
/* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
for (inner = region->inner, casei = 1;
inner;
inner = inner->next, i++, casei++)
{
basic_block s_entry_bb, s_exit_bb;
/* Skip optional reduction region. */
if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
{
--i;
--casei;
continue;
}
s_entry_bb = inner->entry;
s_exit_bb = inner->exit;
t = gimple_block_label (s_entry_bb);
u = build_int_cst (unsigned_type_node, casei);
u = build_case_label (u, NULL, t);
label_vec.quick_push (u);
si = gsi_last_nondebug_bb (s_entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
gsi_remove (&si, true);
single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
if (s_exit_bb == NULL)
continue;
si = gsi_last_nondebug_bb (s_exit_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
gsi_remove (&si, true);
single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
}
/* Error handling code goes in DEFAULT_BB. */
t = gimple_block_label (default_bb);
u = build_case_label (NULL, NULL, t);
make_edge (l0_bb, default_bb, 0);
add_bb_to_loop (default_bb, current_loops->tree_root);
stmt = gimple_build_switch (vmain, u, label_vec);
gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
gsi_remove (&switch_si, true);
si = gsi_start_bb (default_bb);
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
if (exit_reachable)
{
tree bfn_decl;
/* Code to get the next section goes in L1_BB. */
si = gsi_last_nondebug_bb (l1_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
stmt = gimple_build_call (bfn_decl, 0);
gimple_call_set_lhs (stmt, vnext);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
if (cond_var)
{
tree t = build2 (PLUS_EXPR, TREE_TYPE (cond_var),
vnext, build_one_cst (TREE_TYPE (cond_var)));
expand_omp_build_assign (&si, cond_var, t, false);
}
gsi_remove (&si, true);
single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
}
/* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
si = gsi_last_nondebug_bb (l2_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (si)))
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
else if (gimple_omp_return_lhs (gsi_stmt (si)))
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
stmt = gimple_build_call (t, 0);
if (gimple_omp_return_lhs (gsi_stmt (si)))
gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
gsi_insert_after (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
}
/* Expand code for an OpenMP single directive. We've already expanded
much of the code, here we simply place the GOMP_barrier call. */
static void
expand_omp_single (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
gimple_stmt_iterator si;
entry_bb = region->entry;
exit_bb = region->exit;
si = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
si = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
{
tree t = gimple_omp_return_lhs (gsi_stmt (si));
gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT);
}
gsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
/* Generic expansion for OpenMP synchronization directives: master,
ordered and critical. All we need to do here is remove the entry
and exit markers for REGION. */
static void
expand_omp_synch (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
gimple_stmt_iterator si;
entry_bb = region->entry;
exit_bb = region->exit;
si = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
if (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS
&& gimple_omp_teams_host (as_a <gomp_teams *> (gsi_stmt (si))))
{
expand_omp_taskreg (region);
return;
}
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
if (exit_bb)
{
si = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
gsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
}
/* Translate enum omp_memory_order to enum memmodel. The two enums
are using different numbers so that OMP_MEMORY_ORDER_UNSPECIFIED
is 0. */
static enum memmodel
omp_memory_order_to_memmodel (enum omp_memory_order mo)
{
switch (mo)
{
case OMP_MEMORY_ORDER_RELAXED: return MEMMODEL_RELAXED;
case OMP_MEMORY_ORDER_ACQUIRE: return MEMMODEL_ACQUIRE;
case OMP_MEMORY_ORDER_RELEASE: return MEMMODEL_RELEASE;
case OMP_MEMORY_ORDER_ACQ_REL: return MEMMODEL_ACQ_REL;
case OMP_MEMORY_ORDER_SEQ_CST: return MEMMODEL_SEQ_CST;
default: gcc_unreachable ();
}
}
/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
operation as a normal volatile load. */
static bool
expand_omp_atomic_load (basic_block load_bb, tree addr,
tree loaded_val, int index)
{
enum built_in_function tmpbase;
gimple_stmt_iterator gsi;
basic_block store_bb;
location_t loc;
gimple *stmt;
tree decl, call, type, itype;
gsi = gsi_last_nondebug_bb (load_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
loc = gimple_location (stmt);
/* ??? If the target does not implement atomic_load_optab[mode], and mode
is smaller than word size, then expand_atomic_load assumes that the load
is atomic. We could avoid the builtin entirely in this case. */
tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
decl = builtin_decl_explicit (tmpbase);
if (decl == NULL_TREE)
return false;
type = TREE_TYPE (loaded_val);
itype = TREE_TYPE (TREE_TYPE (decl));
enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt);
tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo));
call = build_call_expr_loc (loc, decl, 2, addr, mo);
if (!useless_type_conversion_p (type, itype))
call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
store_bb = single_succ (load_bb);
gsi = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
gsi_remove (&gsi, true);
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_no_phi);
return true;
}
/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
operation as a normal volatile store. */
static bool
expand_omp_atomic_store (basic_block load_bb, tree addr,
tree loaded_val, tree stored_val, int index)
{
enum built_in_function tmpbase;
gimple_stmt_iterator gsi;
basic_block store_bb = single_succ (load_bb);
location_t loc;
gimple *stmt;
tree decl, call, type, itype;
machine_mode imode;
bool exchange;
gsi = gsi_last_nondebug_bb (load_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
/* If the load value is needed, then this isn't a store but an exchange. */
exchange = gimple_omp_atomic_need_value_p (stmt);
gsi = gsi_last_nondebug_bb (store_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
loc = gimple_location (stmt);
/* ??? If the target does not implement atomic_store_optab[mode], and mode
is smaller than word size, then expand_atomic_store assumes that the store
is atomic. We could avoid the builtin entirely in this case. */
tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
decl = builtin_decl_explicit (tmpbase);
if (decl == NULL_TREE)
return false;
type = TREE_TYPE (stored_val);
/* Dig out the type of the function's second argument. */
itype = TREE_TYPE (decl);
itype = TYPE_ARG_TYPES (itype);
itype = TREE_CHAIN (itype);
itype = TREE_VALUE (itype);
imode = TYPE_MODE (itype);
if (exchange && !can_atomic_exchange_p (imode, true))
return false;
if (!useless_type_conversion_p (itype, type))
stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
enum omp_memory_order omo = gimple_omp_atomic_memory_order (stmt);
tree mo = build_int_cst (NULL, omp_memory_order_to_memmodel (omo));
call = build_call_expr_loc (loc, decl, 3, addr, stored_val, mo);
if (exchange)
{
if (!useless_type_conversion_p (type, itype))
call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
}
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
/* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
gsi = gsi_last_nondebug_bb (load_bb);
gsi_remove (&gsi, true);
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_no_phi);
return true;
}
/* A subroutine of expand_omp_atomic. Attempt to implement the atomic
operation as a __atomic_fetch_op builtin. INDEX is log2 of the
size of the data type, and thus usable to find the index of the builtin
decl. Returns false if the expression is not of the proper form. */
static bool
expand_omp_atomic_fetch_op (basic_block load_bb,
tree addr, tree loaded_val,
tree stored_val, int index)
{
enum built_in_function oldbase, newbase, tmpbase;
tree decl, itype, call;
tree lhs, rhs;
basic_block store_bb = single_succ (load_bb);
gimple_stmt_iterator gsi;
gimple *stmt;
location_t loc;
enum tree_code code;
bool need_old, need_new;
machine_mode imode;
/* We expect to find the following sequences:
load_bb:
GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
store_bb:
val = tmp OP something; (or: something OP tmp)
GIMPLE_OMP_STORE (val)
???FIXME: Allow a more flexible sequence.
Perhaps use data flow to pick the statements.
*/
gsi = gsi_after_labels (store_bb);
stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
{
gsi_next_nondebug (&gsi);
if (gsi_end_p (gsi))
return false;
stmt = gsi_stmt (gsi);
}
loc = gimple_location (stmt);
if (!is_gimple_assign (stmt))
return false;
gsi_next_nondebug (&gsi);
if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
return false;
need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
enum omp_memory_order omo
= gimple_omp_atomic_memory_order (last_stmt (load_bb));
enum memmodel mo = omp_memory_order_to_memmodel (omo);
gcc_checking_assert (!need_old || !need_new);
if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
return false;
/* Check for one of the supported fetch-op operations. */
code = gimple_assign_rhs_code (stmt);
switch (code)
{
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
break;
case MINUS_EXPR:
oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
break;
case BIT_AND_EXPR:
oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
break;
case BIT_IOR_EXPR:
oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
break;
case BIT_XOR_EXPR:
oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
break;
default:
return false;
}
/* Make sure the expression is of the proper form. */
if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
rhs = gimple_assign_rhs2 (stmt);
else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
&& operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
rhs = gimple_assign_rhs1 (stmt);
else
return false;
tmpbase = ((enum built_in_function)
((need_new ? newbase : oldbase) + index + 1));
decl = builtin_decl_explicit (tmpbase);
if (decl == NULL_TREE)
return false;
itype = TREE_TYPE (TREE_TYPE (decl));
imode = TYPE_MODE (itype);
/* We could test all of the various optabs involved, but the fact of the
matter is that (with the exception of i486 vs i586 and xadd) all targets
that support any atomic operaton optab also implements compare-and-swap.
Let optabs.c take care of expanding any compare-and-swap loop. */
if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode))
return false;
gsi = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
/* OpenMP does not imply any barrier-like semantics on its atomic ops.
It only requires that the operation happen atomically. Thus we can
use the RELAXED memory model. */
call = build_call_expr_loc (loc, decl, 3, addr,
fold_convert_loc (loc, itype, rhs),
build_int_cst (NULL, mo));
if (need_old || need_new)
{
lhs = need_old ? loaded_val : stored_val;
call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
}
else
call = fold_convert_loc (loc, void_type_node, call);
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
gsi = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
gsi_remove (&gsi, true);
gsi = gsi_last_nondebug_bb (store_bb);
stmt = gsi_stmt (gsi);
gsi_remove (&gsi, true);
if (gimple_in_ssa_p (cfun))
{
release_defs (stmt);
update_ssa (TODO_update_ssa_no_phi);
}
return true;
}
/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
oldval = *addr;
repeat:
newval = rhs; // with oldval replacing *addr in rhs
oldval = __sync_val_compare_and_swap (addr, oldval, newval);
if (oldval != newval)
goto repeat;
INDEX is log2 of the size of the data type, and thus usable to find the
index of the builtin decl. */
static bool
expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
tree addr, tree loaded_val, tree stored_val,
int index)
{
tree loadedi, storedi, initial, new_storedi, old_vali;
tree type, itype, cmpxchg, iaddr, atype;
gimple_stmt_iterator si;
basic_block loop_header = single_succ (load_bb);
gimple *phi, *stmt;
edge e;
enum built_in_function fncode;
/* ??? We need a non-pointer interface to __atomic_compare_exchange in
order to use the RELAXED memory model effectively. */
fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
+ index + 1);
cmpxchg = builtin_decl_explicit (fncode);
if (cmpxchg == NULL_TREE)
return false;
type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val));
atype = type;
itype = TREE_TYPE (TREE_TYPE (cmpxchg));
if (!can_compare_and_swap_p (TYPE_MODE (itype), true)
|| !can_atomic_load_p (TYPE_MODE (itype)))
return false;
/* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
si = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
/* For floating-point values, we'll need to view-convert them to integers
so that we can perform the atomic compare and swap. Simplify the
following code by always setting up the "i"ntegral variables. */
if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
{
tree iaddr_val;
iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
true));
atype = itype;
iaddr_val
= force_gimple_operand_gsi (&si,
fold_convert (TREE_TYPE (iaddr), addr),
false, NULL_TREE, true, GSI_SAME_STMT);
stmt = gimple_build_assign (iaddr, iaddr_val);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
loadedi = create_tmp_var (itype);
if (gimple_in_ssa_p (cfun))
loadedi = make_ssa_name (loadedi);
}
else
{
iaddr = addr;
loadedi = loaded_val;
}
fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
tree loaddecl = builtin_decl_explicit (fncode);
if (loaddecl)
initial
= fold_convert (atype,
build_call_expr (loaddecl, 2, iaddr,
build_int_cst (NULL_TREE,
MEMMODEL_RELAXED)));
else
{
tree off
= build_int_cst (build_pointer_type_for_mode (atype, ptr_mode,
true), 0);
initial = build2 (MEM_REF, atype, iaddr, off);
}
initial
= force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
GSI_SAME_STMT);
/* Move the value to the LOADEDI temporary. */
if (gimple_in_ssa_p (cfun))
{
gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
phi = create_phi_node (loadedi, loop_header);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
initial);
}
else
gsi_insert_before (&si,
gimple_build_assign (loadedi, initial),
GSI_SAME_STMT);
if (loadedi != loaded_val)
{
gimple_stmt_iterator gsi2;
tree x;
x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
gsi2 = gsi_start_bb (loop_header);
if (gimple_in_ssa_p (cfun))
{
gassign *stmt;
x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
true, GSI_SAME_STMT);
stmt = gimple_build_assign (loaded_val, x);
gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
}
else
{
x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
true, GSI_SAME_STMT);
}
}
gsi_remove (&si, true);
si = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
if (iaddr == addr)
storedi = stored_val;
else
storedi
= force_gimple_operand_gsi (&si,
build1 (VIEW_CONVERT_EXPR, itype,
stored_val), true, NULL_TREE, true,
GSI_SAME_STMT);
/* Build the compare&swap statement. */
new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
new_storedi = force_gimple_operand_gsi (&si,
fold_convert (TREE_TYPE (loadedi),
new_storedi),
true, NULL_TREE,
true, GSI_SAME_STMT);
if (gimple_in_ssa_p (cfun))
old_vali = loadedi;
else
{
old_vali = create_tmp_var (TREE_TYPE (loadedi));
stmt = gimple_build_assign (old_vali, loadedi);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
stmt = gimple_build_assign (loadedi, new_storedi);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
}
/* Note that we always perform the comparison as an integer, even for
floating point. This allows the atomic operation to properly
succeed even with NaNs and -0.0. */
tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
stmt = gimple_build_cond_empty (ne);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
/* Update cfg. */
e = single_succ_edge (store_bb);
e->flags &= ~EDGE_FALLTHRU;
e->flags |= EDGE_FALSE_VALUE;
/* Expect no looping. */
e->probability = profile_probability::guessed_always ();
e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
e->probability = profile_probability::guessed_never ();
/* Copy the new value to loadedi (we already did that before the condition
if we are not in SSA). */
if (gimple_in_ssa_p (cfun))
{
phi = gimple_seq_first_stmt (phi_nodes (loop_header));
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
}
/* Remove GIMPLE_OMP_ATOMIC_STORE. */
gsi_remove (&si, true);
class loop *loop = alloc_loop ();
loop->header = loop_header;
loop->latch = store_bb;
add_loop (loop, loop_header->loop_father);
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_no_phi);
return true;
}
/* A subroutine of expand_omp_atomic. Implement the atomic operation as:
GOMP_atomic_start ();
*addr = rhs;
GOMP_atomic_end ();
The result is not globally atomic, but works so long as all parallel
references are within #pragma omp atomic directives. According to
responses received from omp@openmp.org, appears to be within spec.
Which makes sense, since that's how several other compilers handle
this situation as well.
LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
expanding. STORED_VAL is the operand of the matching
GIMPLE_OMP_ATOMIC_STORE.
We replace
GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
loaded_val = *addr;
and replace
GIMPLE_OMP_ATOMIC_STORE (stored_val) with
*addr = stored_val;
*/
static bool
expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
tree addr, tree loaded_val, tree stored_val)
{
gimple_stmt_iterator si;
gassign *stmt;
tree t;
si = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
t = build_call_expr (t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
tree mem = build_simple_mem_ref (addr);
TREE_TYPE (mem) = TREE_TYPE (loaded_val);
TREE_OPERAND (mem, 1)
= fold_convert (build_pointer_type_for_mode (TREE_TYPE (mem), ptr_mode,
true),
TREE_OPERAND (mem, 1));
stmt = gimple_build_assign (loaded_val, mem);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
si = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
stmt = gimple_build_assign (unshare_expr (mem), stored_val);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
t = build_call_expr (t, 0);
force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&si, true);
if (gimple_in_ssa_p (cfun))
update_ssa (TODO_update_ssa_no_phi);
return true;
}
/* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
using expand_omp_atomic_fetch_op. If it failed, we try to
call expand_omp_atomic_pipeline, and if it fails too, the
ultimate fallback is wrapping the operation in a mutex
(expand_omp_atomic_mutex). REGION is the atomic region built
by build_omp_regions_1(). */
static void
expand_omp_atomic (struct omp_region *region)
{
basic_block load_bb = region->entry, store_bb = region->exit;
gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
tree loaded_val = gimple_omp_atomic_load_lhs (load);
tree addr = gimple_omp_atomic_load_rhs (load);
tree stored_val = gimple_omp_atomic_store_val (store);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val));
HOST_WIDE_INT index;
/* Make sure the type is one of the supported sizes. */
index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
index = exact_log2 (index);
if (index >= 0 && index <= 4)
{
unsigned int align = TYPE_ALIGN_UNIT (type);
/* __sync builtins require strict data alignment. */
if (exact_log2 (align) >= index)
{
/* Atomic load. */
scalar_mode smode;
if (loaded_val == stored_val
&& (is_int_mode (TYPE_MODE (type), &smode)
|| is_float_mode (TYPE_MODE (type), &smode))
&& GET_MODE_BITSIZE (smode) <= BITS_PER_WORD
&& expand_omp_atomic_load (load_bb, addr, loaded_val, index))
return;
/* Atomic store. */
if ((is_int_mode (TYPE_MODE (type), &smode)
|| is_float_mode (TYPE_MODE (type), &smode))
&& GET_MODE_BITSIZE (smode) <= BITS_PER_WORD
&& store_bb == single_succ (load_bb)
&& first_stmt (store_bb) == store
&& expand_omp_atomic_store (load_bb, addr, loaded_val,
stored_val, index))
return;
/* When possible, use specialized atomic update functions. */
if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
&& store_bb == single_succ (load_bb)
&& expand_omp_atomic_fetch_op (load_bb, addr,
loaded_val, stored_val, index))
return;
/* If we don't have specialized __sync builtins, try and implement
as a compare and swap loop. */
if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
loaded_val, stored_val, index))
return;
}
}
/* The ultimate fallback is wrapping the operation in a mutex. */
expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
}
/* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
at REGION_EXIT. */
static void
mark_loops_in_oacc_kernels_region (basic_block region_entry,
basic_block region_exit)
{
class loop *outer = region_entry->loop_father;
gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
/* Don't parallelize the kernels region if it contains more than one outer
loop. */
unsigned int nr_outer_loops = 0;
class loop *single_outer = NULL;
for (class loop *loop = outer->inner; loop != NULL; loop = loop->next)
{
gcc_assert (loop_outer (loop) == outer);
if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry))
continue;
if (region_exit != NULL
&& dominated_by_p (CDI_DOMINATORS, loop->header, region_exit))
continue;
nr_outer_loops++;
single_outer = loop;
}
if (nr_outer_loops != 1)
return;
for (class loop *loop = single_outer->inner;
loop != NULL;
loop = loop->inner)
if (loop->next)
return;
/* Mark the loops in the region. */
for (class loop *loop = single_outer; loop != NULL; loop = loop->inner)
loop->in_oacc_kernels_region = true;
}
/* Build target argument identifier from the DEVICE identifier, value
identifier ID and whether the element also has a SUBSEQUENT_PARAM. */
static tree
get_target_argument_identifier_1 (int device, bool subseqent_param, int id)
{
tree t = build_int_cst (integer_type_node, device);
if (subseqent_param)
t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
build_int_cst (integer_type_node,
GOMP_TARGET_ARG_SUBSEQUENT_PARAM));
t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
build_int_cst (integer_type_node, id));
return t;
}
/* Like above but return it in type that can be directly stored as an element
of the argument array. */
static tree
get_target_argument_identifier (int device, bool subseqent_param, int id)
{
tree t = get_target_argument_identifier_1 (device, subseqent_param, id);
return fold_convert (ptr_type_node, t);
}
/* Return a target argument consisting of DEVICE identifier, value identifier
ID, and the actual VALUE. */
static tree
get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id,
tree value)
{
tree t = fold_build2 (LSHIFT_EXPR, integer_type_node,
fold_convert (integer_type_node, value),
build_int_cst (unsigned_type_node,
GOMP_TARGET_ARG_VALUE_SHIFT));
t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t,
get_target_argument_identifier_1 (device, false, id));
t = fold_convert (ptr_type_node, t);
return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT);
}
/* If VALUE is an integer constant greater than -2^15 and smaller than 2^15,
push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it,
otherwise push an identifier (with DEVICE and ID) and the VALUE in two
arguments. */
static void
push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device,
int id, tree value, vec <tree> *args)
{
if (tree_fits_shwi_p (value)
&& tree_to_shwi (value) > -(1 << 15)
&& tree_to_shwi (value) < (1 << 15))
args->quick_push (get_target_argument_value (gsi, device, id, value));
else
{
args->quick_push (get_target_argument_identifier (device, true, id));
value = fold_convert (ptr_type_node, value);
value = force_gimple_operand_gsi (gsi, value, true, NULL, true,
GSI_SAME_STMT);
args->quick_push (value);
}
}
/* Create an array of arguments that is then passed to GOMP_target. */
static tree
get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt)
{
auto_vec <tree, 6> args;
tree clauses = gimple_omp_target_clauses (tgt_stmt);
tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
if (c)
t = OMP_CLAUSE_NUM_TEAMS_EXPR (c);
else
t = integer_minus_one_node;
push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
GOMP_TARGET_ARG_NUM_TEAMS, t, &args);
c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
if (c)
t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c);
else
t = integer_minus_one_node;
push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL,
GOMP_TARGET_ARG_THREAD_LIMIT, t,
&args);
/* Produce more, perhaps device specific, arguments here. */
tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node,
args.length () + 1),
".omp_target_args");
for (unsigned i = 0; i < args.length (); i++)
{
tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
build_int_cst (integer_type_node, i),
NULL_TREE, NULL_TREE);
gsi_insert_before (gsi, gimple_build_assign (ref, args[i]),
GSI_SAME_STMT);
}
tree ref = build4 (ARRAY_REF, ptr_type_node, argarray,
build_int_cst (integer_type_node, args.length ()),
NULL_TREE, NULL_TREE);
gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node),
GSI_SAME_STMT);
TREE_ADDRESSABLE (argarray) = 1;
return build_fold_addr_expr (argarray);
}
/* Expand the GIMPLE_OMP_TARGET starting at REGION. */
static void
expand_omp_target (struct omp_region *region)
{
basic_block entry_bb, exit_bb, new_bb;
struct function *child_cfun;
tree child_fn, block, t;
gimple_stmt_iterator gsi;
gomp_target *entry_stmt;
gimple *stmt;
edge e;
bool offloaded, data_region;
int target_kind;
entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
target_kind = gimple_omp_target_kind (entry_stmt);
new_bb = region->entry;
offloaded = is_gimple_omp_offloaded (entry_stmt);
switch (target_kind)
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
data_region = false;
break;
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
data_region = true;
break;
default:
gcc_unreachable ();
}
child_fn = NULL_TREE;
child_cfun = NULL;
if (offloaded)
{
child_fn = gimple_omp_target_child_fn (entry_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
}
/* Supported by expand_omp_taskreg, but not here. */
if (child_cfun != NULL)
gcc_checking_assert (!child_cfun->cfg);
gcc_checking_assert (!gimple_in_ssa_p (cfun));
entry_bb = region->entry;
exit_bb = region->exit;
switch (target_kind)
{
case GF_OMP_TARGET_KIND_OACC_KERNELS:
mark_loops_in_oacc_kernels_region (region->entry, region->exit);
/* Further down, all OpenACC compute constructs will be mapped to
BUILT_IN_GOACC_PARALLEL, and to distinguish between them, there
is an "oacc kernels" attribute set for OpenACC kernels. */
DECL_ATTRIBUTES (child_fn)
= tree_cons (get_identifier ("oacc kernels"),
NULL_TREE, DECL_ATTRIBUTES (child_fn));
break;
case GF_OMP_TARGET_KIND_OACC_SERIAL:
/* Further down, all OpenACC compute constructs will be mapped to
BUILT_IN_GOACC_PARALLEL, and to distinguish between them, there
is an "oacc serial" attribute set for OpenACC serial. */
DECL_ATTRIBUTES (child_fn)
= tree_cons (get_identifier ("oacc serial"),
NULL_TREE, DECL_ATTRIBUTES (child_fn));
break;
default:
break;
}
if (offloaded)
{
unsigned srcidx, dstidx, num;
/* If the offloading region needs data sent from the parent
function, then the very first statement (except possible
tree profile counter updates) of the offloading body
is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
&.OMP_DATA_O is passed as an argument to the child function,
we need to replace it with the argument as seen by the child
function.
In most cases, this will end up being the identity assignment
.OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
a function call that has been inlined, the original PARM_DECL
.OMP_DATA_I may have been converted into a different local
variable. In which case, we need to keep the assignment. */
tree data_arg = gimple_omp_target_data_arg (entry_stmt);
if (data_arg)
{
basic_block entry_succ_bb = single_succ (entry_bb);
gimple_stmt_iterator gsi;
tree arg;
gimple *tgtcopy_stmt = NULL;
tree sender = TREE_VEC_ELT (data_arg, 0);
for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
{
gcc_assert (!gsi_end_p (gsi));
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_ASSIGN)
continue;
if (gimple_num_ops (stmt) == 2)
{
tree arg = gimple_assign_rhs1 (stmt);
/* We're ignoring the subcode because we're
effectively doing a STRIP_NOPS. */
if (TREE_CODE (arg) == ADDR_EXPR
&& TREE_OPERAND (arg, 0) == sender)
{
tgtcopy_stmt = stmt;
break;
}
}
}
gcc_assert (tgtcopy_stmt != NULL);
arg = DECL_ARGUMENTS (child_fn);
gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
gsi_remove (&gsi, true);
}
/* Declare local variables needed in CHILD_CFUN. */
block = DECL_INITIAL (child_fn);
BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
/* The gimplifier could record temporaries in the offloading block
rather than in containing function's local_decls chain,
which would mean cgraph missed finalizing them. Do it now. */
for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
varpool_node::finalize_decl (t);
DECL_SAVED_TREE (child_fn) = NULL;
/* We'll create a CFG for child_fn, so no gimple body is needed. */
gimple_set_body (child_fn, NULL);
TREE_USED (block) = 1;
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Split ENTRY_BB at GIMPLE_*,
so that it can be moved to the child function. */
gsi = gsi_last_nondebug_bb (entry_bb);
stmt = gsi_stmt (gsi);
gcc_assert (stmt
&& gimple_code (stmt) == gimple_code (entry_stmt));
e = split_block (entry_bb, stmt);
gsi_remove (&gsi, true);
entry_bb = e->dest;
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
/* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
stmt = gimple_build_return (NULL);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
}
/* Move the offloading region into CHILD_CFUN. */
block = gimple_block (entry_stmt);
new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
if (exit_bb)
single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
/* When the OMP expansion process cannot guarantee an up-to-date
loop tree arrange for the child function to fixup loops. */
if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
/* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
num = vec_safe_length (child_cfun->local_decls);
for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
{
t = (*child_cfun->local_decls)[srcidx];
if (DECL_CONTEXT (t) == cfun->decl)
continue;
if (srcidx != dstidx)
(*child_cfun->local_decls)[dstidx] = t;
dstidx++;
}
if (dstidx != num)
vec_safe_truncate (child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
child_cfun->curr_properties = cfun->curr_properties;
child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, true);
/* Add the new function to the offload table. */
if (ENABLE_OFFLOADING)
{
if (in_lto_p)
DECL_PRESERVE_P (child_fn) = 1;
vec_safe_push (offload_funcs, child_fn);
}
bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
&& !DECL_ASSEMBLER_NAME_SET_P (child_fn);
/* Fix the callgraph edges for child_cfun. Those for cfun will be
fixed in a following pass. */
push_cfun (child_cfun);
if (need_asm)
assign_assembler_name_if_needed (child_fn);
cgraph_edge::rebuild_edges ();
/* Some EH regions might become dead, see PR34608. If
pass_cleanup_cfg isn't the first pass to happen with the
new child, these dead EH edges might cause problems.
Clean them up now. */
if (flag_exceptions)
{
basic_block bb;
bool changed = false;
FOR_EACH_BB_FN (bb, cfun)
changed |= gimple_purge_dead_eh_edges (bb);
if (changed)
cleanup_tree_cfg ();
}
if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
pop_cfun ();
if (dump_file && !gimple_in_ssa_p (cfun))
{
omp_any_child_fn_dumped = true;
dump_function_header (dump_file, child_fn, dump_flags);
dump_function_to_file (child_fn, dump_file, dump_flags);
}
adjust_context_and_scope (region, gimple_block (entry_stmt), child_fn);
}
/* Emit a library call to launch the offloading region, or do data
transfers. */
tree t1, t2, t3, t4, depend, c, clauses;
enum built_in_function start_ix;
unsigned int flags_i = 0;
switch (gimple_omp_target_kind (entry_stmt))
{
case GF_OMP_TARGET_KIND_REGION:
start_ix = BUILT_IN_GOMP_TARGET;
break;
case GF_OMP_TARGET_KIND_DATA:
start_ix = BUILT_IN_GOMP_TARGET_DATA;
break;
case GF_OMP_TARGET_KIND_UPDATE:
start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
flags_i |= GOMP_TARGET_FLAG_EXIT_DATA;
break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
start_ix = BUILT_IN_GOACC_PARALLEL;
break;
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
start_ix = BUILT_IN_GOACC_DATA_START;
break;
case GF_OMP_TARGET_KIND_OACC_UPDATE:
start_ix = BUILT_IN_GOACC_UPDATE;
break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
break;
case GF_OMP_TARGET_KIND_OACC_DECLARE:
start_ix = BUILT_IN_GOACC_DECLARE;
break;
default:
gcc_unreachable ();
}
clauses = gimple_omp_target_clauses (entry_stmt);
tree device = NULL_TREE;
location_t device_loc = UNKNOWN_LOCATION;
tree goacc_flags = NULL_TREE;
if (is_gimple_omp_oacc (entry_stmt))
{
/* By default, no GOACC_FLAGs are set. */
goacc_flags = integer_zero_node;
}
else
{
c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE);
if (c)
{
device = OMP_CLAUSE_DEVICE_ID (c);
device_loc = OMP_CLAUSE_LOCATION (c);
}
else
{
/* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
library choose). */
device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
device_loc = gimple_location (entry_stmt);
}
c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT);
if (c)
flags_i |= GOMP_TARGET_FLAG_NOWAIT;
}
/* By default, there is no conditional. */
tree cond = NULL_TREE;
c = omp_find_clause (clauses, OMP_CLAUSE_IF);
if (c)
cond = OMP_CLAUSE_IF_EXPR (c);
/* If we found the clause 'if (cond)', build:
OpenACC: goacc_flags = (cond ? goacc_flags : flags | GOACC_FLAG_HOST_FALLBACK)
OpenMP: device = (cond ? device : GOMP_DEVICE_HOST_FALLBACK) */
if (cond)
{
tree *tp;
if (is_gimple_omp_oacc (entry_stmt))
tp = &goacc_flags;
else
{
/* Ensure 'device' is of the correct type. */
device = fold_convert_loc (device_loc, integer_type_node, device);
tp = &device;
}
cond = gimple_boolify (cond);
basic_block cond_bb, then_bb, else_bb;
edge e;
tree tmp_var;
tmp_var = create_tmp_var (TREE_TYPE (*tp));
if (offloaded)
e = split_block_after_labels (new_bb);
else
{
gsi = gsi_last_nondebug_bb (new_bb);
gsi_prev (&gsi);
e = split_block (new_bb, gsi_stmt (gsi));
}
cond_bb = e->src;
new_bb = e->dest;
remove_edge (e);
then_bb = create_empty_bb (cond_bb);
else_bb = create_empty_bb (then_bb);
set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
stmt = gimple_build_cond_empty (cond);
gsi = gsi_last_bb (cond_bb);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi = gsi_start_bb (then_bb);
stmt = gimple_build_assign (tmp_var, *tp);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
gsi = gsi_start_bb (else_bb);
if (is_gimple_omp_oacc (entry_stmt))
stmt = gimple_build_assign (tmp_var,
BIT_IOR_EXPR,
*tp,
build_int_cst (integer_type_node,
GOACC_FLAG_HOST_FALLBACK));
else
stmt = gimple_build_assign (tmp_var,
build_int_cst (integer_type_node,
GOMP_DEVICE_HOST_FALLBACK));
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
add_bb_to_loop (then_bb, cond_bb->loop_father);
add_bb_to_loop (else_bb, cond_bb->loop_father);
make_edge (then_bb, new_bb, EDGE_FALLTHRU);
make_edge (else_bb, new_bb, EDGE_FALLTHRU);
*tp = tmp_var;
gsi = gsi_last_nondebug_bb (new_bb);
}
else
{
gsi = gsi_last_nondebug_bb (new_bb);
if (device != NULL_TREE)
device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE,
true, GSI_SAME_STMT);
}
t = gimple_omp_target_data_arg (entry_stmt);
if (t == NULL)
{
t1 = size_zero_node;
t2 = build_zero_cst (ptr_type_node);
t3 = t2;
t4 = t2;
}
else
{
t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
t1 = size_binop (PLUS_EXPR, t1, size_int (1));
t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
}
gimple *g;
bool tagging = false;
/* The maximum number used by any start_ix, without varargs. */
auto_vec<tree, 11> args;
if (is_gimple_omp_oacc (entry_stmt))
{
tree goacc_flags_m = fold_build1 (GOACC_FLAGS_MARSHAL_OP,
TREE_TYPE (goacc_flags), goacc_flags);
goacc_flags_m = force_gimple_operand_gsi (&gsi, goacc_flags_m, true,
NULL_TREE, true,
GSI_SAME_STMT);
args.quick_push (goacc_flags_m);
}
else
args.quick_push (device);
if (offloaded)
args.quick_push (build_fold_addr_expr (child_fn));
args.quick_push (t1);
args.quick_push (t2);
args.quick_push (t3);
args.quick_push (t4);
switch (start_ix)
{
case BUILT_IN_GOACC_DATA_START:
case BUILT_IN_GOACC_DECLARE:
case BUILT_IN_GOMP_TARGET_DATA:
break;
case BUILT_IN_GOMP_TARGET:
case BUILT_IN_GOMP_TARGET_UPDATE:
case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA:
args.quick_push (build_int_cst (unsigned_type_node, flags_i));
c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
if (c)
depend = OMP_CLAUSE_DECL (c);
else
depend = build_int_cst (ptr_type_node, 0);
args.quick_push (depend);
if (start_ix == BUILT_IN_GOMP_TARGET)
args.quick_push (get_target_arguments (&gsi, entry_stmt));
break;
case BUILT_IN_GOACC_PARALLEL:
if (lookup_attribute ("oacc serial", DECL_ATTRIBUTES (child_fn)) != NULL)
{
tree dims = NULL_TREE;
unsigned int ix;
/* For serial constructs we set all dimensions to 1. */
for (ix = GOMP_DIM_MAX; ix--;)
dims = tree_cons (NULL_TREE, integer_one_node, dims);
oacc_replace_fn_attrib (child_fn, dims);
}
else
oacc_set_fn_attrib (child_fn, clauses, &args);
tagging = true;
/* FALLTHRU */
case BUILT_IN_GOACC_ENTER_EXIT_DATA:
case BUILT_IN_GOACC_UPDATE:
{
tree t_async = NULL_TREE;
/* If present, use the value specified by the respective
clause, making sure that is of the correct type. */
c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC);
if (c)
t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
integer_type_node,
OMP_CLAUSE_ASYNC_EXPR (c));
else if (!tagging)
/* Default values for t_async. */
t_async = fold_convert_loc (gimple_location (entry_stmt),
integer_type_node,
build_int_cst (integer_type_node,
GOMP_ASYNC_SYNC));
if (tagging && t_async)
{
unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX;
if (TREE_CODE (t_async) == INTEGER_CST)
{
/* See if we can pack the async arg in to the tag's
operand. */
i_async = TREE_INT_CST_LOW (t_async);
if (i_async < GOMP_LAUNCH_OP_MAX)
t_async = NULL_TREE;
else
i_async = GOMP_LAUNCH_OP_MAX;
}
args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE,
i_async));
}
if (t_async)
args.safe_push (force_gimple_operand_gsi (&gsi, t_async, true,
NULL_TREE, true,
GSI_SAME_STMT));
/* Save the argument index, and ... */
unsigned t_wait_idx = args.length ();
unsigned num_waits = 0;
c = omp_find_clause (clauses, OMP_CLAUSE_WAIT);
if (!tagging || c)
/* ... push a placeholder. */
args.safe_push (integer_zero_node);
for (; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
{
tree arg = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
integer_type_node,
OMP_CLAUSE_WAIT_EXPR (c));
arg = force_gimple_operand_gsi (&gsi, arg, true, NULL_TREE, true,
GSI_SAME_STMT);
args.safe_push (arg);
num_waits++;
}
if (!tagging || num_waits)
{
tree len;
/* Now that we know the number, update the placeholder. */
if (tagging)
len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits);
else
len = build_int_cst (integer_type_node, num_waits);
len = fold_convert_loc (gimple_location (entry_stmt),
unsigned_type_node, len);
args[t_wait_idx] = len;
}
}
break;
default:
gcc_unreachable ();
}
if (tagging)
/* Push terminal marker - zero. */
args.safe_push (oacc_launch_pack (0, NULL_TREE, 0));
g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
gimple_set_location (g, gimple_location (entry_stmt));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
if (!offloaded)
{
g = gsi_stmt (gsi);
gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
gsi_remove (&gsi, true);
}
if (data_region && region->exit)
{
gsi = gsi_last_nondebug_bb (region->exit);
g = gsi_stmt (gsi);
gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
gsi_remove (&gsi, true);
}
}
/* Expand the parallel region tree rooted at REGION. Expansion
proceeds in depth-first order. Innermost regions are expanded
first. This way, parallel regions that require a new function to
be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
internal dependencies in their body. */
static void
expand_omp (struct omp_region *region)
{
omp_any_child_fn_dumped = false;
while (region)
{
location_t saved_location;
gimple *inner_stmt = NULL;
/* First, determine whether this is a combined parallel+workshare
region. */
if (region->type == GIMPLE_OMP_PARALLEL)
determine_parallel_type (region);
if (region->type == GIMPLE_OMP_FOR
&& gimple_omp_for_combined_p (last_stmt (region->entry)))
inner_stmt = last_stmt (region->inner->entry);
if (region->inner)
expand_omp (region->inner);
saved_location = input_location;
if (gimple_has_location (last_stmt (region->entry)))
input_location = gimple_location (last_stmt (region->entry));
switch (region->type)
{
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
expand_omp_taskreg (region);
break;
case GIMPLE_OMP_FOR:
expand_omp_for (region, inner_stmt);
break;
case GIMPLE_OMP_SECTIONS:
expand_omp_sections (region);
break;
case GIMPLE_OMP_SECTION:
/* Individual omp sections are handled together with their
parent GIMPLE_OMP_SECTIONS region. */
break;
case GIMPLE_OMP_SINGLE:
expand_omp_single (region);
break;
case GIMPLE_OMP_ORDERED:
{
gomp_ordered *ord_stmt
= as_a <gomp_ordered *> (last_stmt (region->entry));
if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_DEPEND))
{
/* We'll expand these when expanding corresponding
worksharing region with ordered(n) clause. */
gcc_assert (region->outer
&& region->outer->type == GIMPLE_OMP_FOR);
region->ord_stmt = ord_stmt;
break;
}
}
/* FALLTHRU */
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TEAMS:
expand_omp_synch (region);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
expand_omp_atomic (region);
break;
case GIMPLE_OMP_TARGET:
expand_omp_target (region);
break;
default:
gcc_unreachable ();
}
input_location = saved_location;
region = region->next;
}
if (omp_any_child_fn_dumped)
{
if (dump_file)
dump_function_header (dump_file, current_function_decl, dump_flags);
omp_any_child_fn_dumped = false;
}
}
/* Helper for build_omp_regions. Scan the dominator tree starting at
block BB. PARENT is the region that contains BB. If SINGLE_TREE is
true, the function ends once a single tree is built (otherwise, whole
forest of OMP constructs may be built). */
static void
build_omp_regions_1 (basic_block bb, struct omp_region *parent,
bool single_tree)
{
gimple_stmt_iterator gsi;
gimple *stmt;
basic_block son;
gsi = gsi_last_nondebug_bb (bb);
if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
{
struct omp_region *region;
enum gimple_code code;
stmt = gsi_stmt (gsi);
code = gimple_code (stmt);
if (code == GIMPLE_OMP_RETURN)
{
/* STMT is the return point out of region PARENT. Mark it
as the exit point and make PARENT the immediately
enclosing region. */
gcc_assert (parent);
region = parent;
region->exit = bb;
parent = parent->outer;
}
else if (code == GIMPLE_OMP_ATOMIC_STORE)
{
/* GIMPLE_OMP_ATOMIC_STORE is analogous to
GIMPLE_OMP_RETURN, but matches with
GIMPLE_OMP_ATOMIC_LOAD. */
gcc_assert (parent);
gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
region = parent;
region->exit = bb;
parent = parent->outer;
}
else if (code == GIMPLE_OMP_CONTINUE)
{
gcc_assert (parent);
parent->cont = bb;
}
else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
{
/* GIMPLE_OMP_SECTIONS_SWITCH is part of
GIMPLE_OMP_SECTIONS, and we do nothing for it. */
}
else
{
region = new_omp_region (bb, code, parent);
/* Otherwise... */
if (code == GIMPLE_OMP_TARGET)
{
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
break;
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
/* ..., other than for those stand-alone directives... */
region = NULL;
break;
default:
gcc_unreachable ();
}
}
else if (code == GIMPLE_OMP_ORDERED
&& omp_find_clause (gimple_omp_ordered_clauses
(as_a <gomp_ordered *> (stmt)),
OMP_CLAUSE_DEPEND))
/* #pragma omp ordered depend is also just a stand-alone
directive. */
region = NULL;
else if (code == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (stmt))
/* #pragma omp taskwait depend(...) is a stand-alone directive. */
region = NULL;
/* ..., this directive becomes the parent for a new region. */
if (region)
parent = region;
}
}
if (single_tree && !parent)
return;
for (son = first_dom_son (CDI_DOMINATORS, bb);
son;
son = next_dom_son (CDI_DOMINATORS, son))
build_omp_regions_1 (son, parent, single_tree);
}
/* Builds the tree of OMP regions rooted at ROOT, storing it to
root_omp_region. */
static void
build_omp_regions_root (basic_block root)
{
gcc_assert (root_omp_region == NULL);
build_omp_regions_1 (root, NULL, true);
gcc_assert (root_omp_region != NULL);
}
/* Expands omp construct (and its subconstructs) starting in HEAD. */
void
omp_expand_local (basic_block head)
{
build_omp_regions_root (head);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "\nOMP region tree\n\n");
dump_omp_region (dump_file, root_omp_region, 0);
fprintf (dump_file, "\n");
}
remove_exit_barriers (root_omp_region);
expand_omp (root_omp_region);
omp_free_regions ();
}
/* Scan the CFG and build a tree of OMP regions. Return the root of
the OMP region tree. */
static void
build_omp_regions (void)
{
gcc_assert (root_omp_region == NULL);
calculate_dominance_info (CDI_DOMINATORS);
build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
}
/* Main entry point for expanding OMP-GIMPLE into runtime calls. */
static unsigned int
execute_expand_omp (void)
{
build_omp_regions ();
if (!root_omp_region)
return 0;
if (dump_file)
{
fprintf (dump_file, "\nOMP region tree\n\n");
dump_omp_region (dump_file, root_omp_region, 0);
fprintf (dump_file, "\n");
}
remove_exit_barriers (root_omp_region);
expand_omp (root_omp_region);
if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
verify_loop_structure ();
cleanup_tree_cfg ();
omp_free_regions ();
return 0;
}
/* OMP expansion -- the default pass, run before creation of SSA form. */
namespace {
const pass_data pass_data_expand_omp =
{
GIMPLE_PASS, /* type */
"ompexp", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_eomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_expand_omp : public gimple_opt_pass
{
public:
pass_expand_omp (gcc::context *ctxt)
: gimple_opt_pass (pass_data_expand_omp, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *)
{
bool gate = ((flag_openacc != 0 || flag_openmp != 0
|| flag_openmp_simd != 0)
&& !seen_error ());
/* This pass always runs, to provide PROP_gimple_eomp.
But often, there is nothing to do. */
if (!gate)
return 0;
return execute_expand_omp ();
}
}; // class pass_expand_omp
} // anon namespace
gimple_opt_pass *
make_pass_expand_omp (gcc::context *ctxt)
{
return new pass_expand_omp (ctxt);
}
namespace {
const pass_data pass_data_expand_omp_ssa =
{
GIMPLE_PASS, /* type */
"ompexpssa", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_cfg | PROP_ssa, /* properties_required */
PROP_gimple_eomp, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
};
class pass_expand_omp_ssa : public gimple_opt_pass
{
public:
pass_expand_omp_ssa (gcc::context *ctxt)
: gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *fun)
{
return !(fun->curr_properties & PROP_gimple_eomp);
}
virtual unsigned int execute (function *) { return execute_expand_omp (); }
opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); }
}; // class pass_expand_omp_ssa
} // anon namespace
gimple_opt_pass *
make_pass_expand_omp_ssa (gcc::context *ctxt)
{
return new pass_expand_omp_ssa (ctxt);
}
/* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
GIMPLE_* codes. */
bool
omp_make_gimple_edges (basic_block bb, struct omp_region **region,
int *region_idx)
{
gimple *last = last_stmt (bb);
enum gimple_code code = gimple_code (last);
struct omp_region *cur_region = *region;
bool fallthru = false;
switch (code)
{
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_SECTION:
cur_region = new_omp_region (bb, code, cur_region);
fallthru = true;
break;
case GIMPLE_OMP_TASK:
cur_region = new_omp_region (bb, code, cur_region);
fallthru = true;
if (gimple_omp_task_taskwait_p (last))
cur_region = cur_region->outer;
break;
case GIMPLE_OMP_ORDERED:
cur_region = new_omp_region (bb, code, cur_region);
fallthru = true;
if (omp_find_clause (gimple_omp_ordered_clauses
(as_a <gomp_ordered *> (last)),
OMP_CLAUSE_DEPEND))
cur_region = cur_region->outer;
break;
case GIMPLE_OMP_TARGET:
cur_region = new_omp_region (bb, code, cur_region);
fallthru = true;
switch (gimple_omp_target_kind (last))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
break;
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
cur_region = cur_region->outer;
break;
default:
gcc_unreachable ();
}
break;
case GIMPLE_OMP_SECTIONS:
cur_region = new_omp_region (bb, code, cur_region);
fallthru = true;
break;
case GIMPLE_OMP_SECTIONS_SWITCH:
fallthru = false;
break;
case GIMPLE_OMP_ATOMIC_LOAD:
case GIMPLE_OMP_ATOMIC_STORE:
fallthru = true;
break;
case GIMPLE_OMP_RETURN:
/* In the case of a GIMPLE_OMP_SECTION, the edge will go
somewhere other than the next block. This will be
created later. */
cur_region->exit = bb;
if (cur_region->type == GIMPLE_OMP_TASK)
/* Add an edge corresponding to not scheduling the task
immediately. */
make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
fallthru = cur_region->type != GIMPLE_OMP_SECTION;
cur_region = cur_region->outer;
break;
case GIMPLE_OMP_CONTINUE:
cur_region->cont = bb;
switch (cur_region->type)
{
case GIMPLE_OMP_FOR:
/* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
succs edges as abnormal to prevent splitting
them. */
single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
/* Make the loopback edge. */
make_edge (bb, single_succ (cur_region->entry),
EDGE_ABNORMAL);
/* Create an edge from GIMPLE_OMP_FOR to exit, which
corresponds to the case that the body of the loop
is not executed at all. */
make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
fallthru = false;
break;
case GIMPLE_OMP_SECTIONS:
/* Wire up the edges into and out of the nested sections. */
{
basic_block switch_bb = single_succ (cur_region->entry);
struct omp_region *i;
for (i = cur_region->inner; i ; i = i->next)
{
gcc_assert (i->type == GIMPLE_OMP_SECTION);
make_edge (switch_bb, i->entry, 0);
make_edge (i->exit, bb, EDGE_FALLTHRU);
}
/* Make the loopback edge to the block with
GIMPLE_OMP_SECTIONS_SWITCH. */
make_edge (bb, switch_bb, 0);
/* Make the edge from the switch to exit. */
make_edge (switch_bb, bb->next_bb, 0);
fallthru = false;
}
break;
case GIMPLE_OMP_TASK:
fallthru = true;
break;
default:
gcc_unreachable ();
}
break;
default:
gcc_unreachable ();
}
if (*region != cur_region)
{
*region = cur_region;
if (cur_region)
*region_idx = cur_region->entry->index;
else
*region_idx = 0;
}
return fallthru;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
GB_binop__bxnor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxnor_int8
// A.*B function (eWiseMult): GB_AemultB__bxnor_int8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bxnor_int8
// C+=b function (dense accum): GB_Cdense_accumb__bxnor_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int8
// C=scalar+B GB_bind1st__bxnor_int8
// C=scalar+B' GB_bind1st_tran__bxnor_int8
// C=A+scalar GB_bind2nd__bxnor_int8
// C=A'+scalar GB_bind2nd_tran__bxnor_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ~((x) ^ (y)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT8 || GxB_NO_BXNOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxnor_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxnor_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bxnor_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxnor_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxnor_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxnor_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB_bind1st_tran__bxnor_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB_bind2nd_tran__bxnor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-parallel-if.c | #include <omp.h>
extern void abort (void);
int
foo (void)
{
return 10;
}
main ()
{
int A = 0;
#pragma omp parallel if (foo () > 10) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 1)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (3) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 3)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (foo ()) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 10)
abort ();
return 0;
}
|
SlicedBasedTraversal.h | /**
* @file SlicedBasedTraversal.h
*
* @date 09 Jan 2019
* @author seckler
*/
#pragma once
#include <algorithm>
#include "autopas/containers/cellPairTraversals/CellPairTraversal.h"
#include "autopas/utils/DataLayoutConverter.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class provides the sliced traversal.
*
* The traversal finds the longest dimension of the simulation domain and cuts
* the domain in one slice (block) per thread along this dimension. Slices are
* assigned to the threads in a round robin fashion. Each thread locks the cells
* on the boundary wall to the previous slice with one lock. This lock is lifted
* as soon the boundary wall is fully processed.
*
* @tparam ParticleCell The type of cells.
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam dataLayout
* @tparam useNewton3
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
class SlicedBasedTraversal : public CellPairTraversal<ParticleCell> {
public:
/**
* Constructor of the sliced traversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length (cutoff + skin).
* @param cellLength cell length.
*/
explicit SlicedBasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: CellPairTraversal<ParticleCell>(dims),
_overlap{},
_dimsPerLength{},
_interactionLength(interactionLength),
_cellLength(cellLength),
_overlapLongestAxis(0),
_sliceThickness{},
locks(),
_dataLayoutConverter(pairwiseFunctor) {
init(dims);
}
/**
* Checks if the traversal is applicable to the current state of the domain.
* @return true iff the traversal can be applied.
*/
bool isApplicable() const override {
return not(dataLayout == DataLayoutOption::cuda) and this->_sliceThickness.size() > 0;
}
/**
* Load Data Layouts required for this Traversal if cells have been set through setCellsToTraverse().
*/
void initTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.loadDataLayout(cells[i]);
}
}
}
/**
* Write Data to AoS if cells have been set through setCellsToTraverse().
*/
void endTraversal() override {
if (this->_cells) {
auto &cells = *(this->_cells);
#ifdef AUTOPAS_OPENMP
// @todo find a condition on when to use omp or when it is just overhead
#pragma omp parallel for
#endif
for (size_t i = 0; i < cells.size(); ++i) {
_dataLayoutConverter.storeDataLayout(cells[i]);
}
}
}
protected:
/**
* Resets the cell structure of the traversal.
* @param dims
*/
void init(const std::array<unsigned long, 3> &dims);
/**
* The main traversal of the sliced traversal.
*
* @copydetails C01BasedTraversal::c01Traversal()
*
* @tparam allCells Defines whether or not to iterate over all cells with the loop body given as argument. By default
* (allCells=false) it will not iterate over all cells and instead skip the last few cells, because they will be
* covered by the base step. If you plan to use the default base step of the traversal on this function, use
* allCells=false, if you plan to just iterate over all cells, e.g., to iterate over verlet lists saved within the
* cells, use allCells=true. For the sliced step if allCells is false, iteration will not occur over the last layer of
* cells (for _overlap=1) (in x, y and z direction).
*/
template <bool allCells = false, typename LoopBody>
inline void slicedTraversal(LoopBody &&loopBody);
/**
* Overlap of interacting cells. Array allows asymmetric cell sizes.
*/
std::array<unsigned long, 3> _overlap;
private:
/**
* Store ids of dimensions ordered by number of cells per dimensions.
*/
std::array<int, 3> _dimsPerLength;
/**
* Interaction length (cutoff + skin).
*/
double _interactionLength;
/**
* Cell length in CellBlock3D.
*/
std::array<double, 3> _cellLength;
/**
* Overlap of interacting cells along the longest axis.
*/
unsigned long _overlapLongestAxis;
/**
* The number of cells per slice in the dimension that was sliced.
*/
std::vector<unsigned long> _sliceThickness;
std::vector<AutoPasLock> locks;
/**
* Data Layout Converter to be used with this traversal.
*/
utils::DataLayoutConverter<PairwiseFunctor, dataLayout> _dataLayoutConverter;
};
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
inline void SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::init(
const std::array<unsigned long, 3> &dims) {
for (unsigned int d = 0; d < 3; d++) {
_overlap[d] = std::ceil(_interactionLength / _cellLength[d]);
}
// find longest dimension
auto minMaxElem = std::minmax_element(this->_cellsPerDimension.begin(), this->_cellsPerDimension.end());
_dimsPerLength[0] = (int)std::distance(this->_cellsPerDimension.begin(), minMaxElem.second);
_dimsPerLength[2] = (int)std::distance(this->_cellsPerDimension.begin(), minMaxElem.first);
_dimsPerLength[1] = 3 - (_dimsPerLength[0] + _dimsPerLength[2]);
_overlapLongestAxis = _overlap[_dimsPerLength[0]];
// split domain across its longest dimension
auto numSlices = (size_t)autopas_get_max_threads();
auto minSliceThickness = this->_cellsPerDimension[_dimsPerLength[0]] / numSlices;
if (minSliceThickness < _overlapLongestAxis + 1) {
minSliceThickness = _overlapLongestAxis + 1;
numSlices = this->_cellsPerDimension[_dimsPerLength[0]] / minSliceThickness;
AutoPasLog(debug, "Sliced traversal only using {} threads because the number of cells is too small.", numSlices);
}
_sliceThickness.clear();
// abort if domain is too small -> cleared _sliceThickness array indicates non applicability
if (numSlices < 1) return;
_sliceThickness.insert(_sliceThickness.begin(), numSlices, minSliceThickness);
auto rest = this->_cellsPerDimension[_dimsPerLength[0]] - _sliceThickness[0] * numSlices;
for (size_t i = 0; i < rest; ++i) ++_sliceThickness[i];
// decreases last _sliceThickness by _overlapLongestAxis to account for the way we handle base cells
_sliceThickness.back() -= _overlapLongestAxis;
locks.resize((numSlices - 1) * _overlapLongestAxis);
}
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3>
template <bool allCells, typename LoopBody>
void SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::slicedTraversal(LoopBody &&loopBody) {
using std::array;
auto numSlices = _sliceThickness.size();
// 0) check if applicable
std::array<size_t, 2> overLapps23{_overlap[_dimsPerLength[1]], _overlap[_dimsPerLength[2]]};
if (allCells) {
overLapps23 = {0ul, 0ul};
_sliceThickness.back() += _overlapLongestAxis;
}
#ifdef AUTOPAS_OPENMP
// although every thread gets exactly one iteration (=slice) this is faster than a normal parallel region
#pragma omp parallel for schedule(static, 1) num_threads(numSlices)
#endif
for (size_t slice = 0; slice < numSlices; ++slice) {
array<unsigned long, 3> myStartArray{0, 0, 0};
for (size_t i = 0; i < slice; ++i) {
myStartArray[_dimsPerLength[0]] += _sliceThickness[i];
}
// all but the first slice need to lock their starting layers.
if (slice > 0) {
for (unsigned long i = 0ul; i < _overlapLongestAxis; i++) {
locks[((slice - 1) * _overlapLongestAxis) + i].lock();
}
}
const auto lastLayer = myStartArray[_dimsPerLength[0]] + _sliceThickness[slice];
for (unsigned long dimSlice = myStartArray[_dimsPerLength[0]]; dimSlice < lastLayer; ++dimSlice) {
// at the last layers request lock for the starting layer of the next
// slice. Does not apply for the last slice.
if (slice != numSlices - 1 && dimSlice >= lastLayer - _overlapLongestAxis) {
locks[((slice + 1) * _overlapLongestAxis) - (lastLayer - dimSlice)].lock();
}
for (unsigned long dimMedium = 0; dimMedium < this->_cellsPerDimension[_dimsPerLength[1]] - overLapps23[0];
++dimMedium) {
for (unsigned long dimShort = 0; dimShort < this->_cellsPerDimension[_dimsPerLength[2]] - overLapps23[1];
++dimShort) {
array<unsigned long, 3> idArray = {};
idArray[_dimsPerLength[0]] = dimSlice;
idArray[_dimsPerLength[1]] = dimMedium;
idArray[_dimsPerLength[2]] = dimShort;
loopBody(idArray[0], idArray[1], idArray[2]);
}
}
// at the end of the first layers release the lock
if (slice > 0 && dimSlice < myStartArray[_dimsPerLength[0]] + _overlapLongestAxis) {
const unsigned long index = ((slice - 1) * _overlapLongestAxis) + (dimSlice - myStartArray[_dimsPerLength[0]]);
locks[index].unlock();
// if lastLayer is reached within overlap area, unlock all following locks
if (dimSlice == lastLayer - 1) {
for (unsigned long i = dimSlice + 1; i < myStartArray[_dimsPerLength[0]] + _overlapLongestAxis; ++i) {
const unsigned long index = ((slice - 1) * _overlapLongestAxis) + (i - myStartArray[_dimsPerLength[0]]);
locks[index].unlock();
}
}
} else if (slice != numSlices - 1 && dimSlice == lastLayer - 1) {
// clearing of the locks set on the last layers of each slice
for (size_t i = (slice * _overlapLongestAxis); i < (slice + 1) * _overlapLongestAxis; ++i) {
locks[i].unlock();
}
}
}
}
if (allCells) {
_sliceThickness.back() -= _overlapLongestAxis;
}
}
} // namespace autopas
|
omp.h | // Copyright 2021 ETH Zurich and University of Bologna.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <stdint.h>
#include "eu.h"
#include "kmp.h"
#include "snrt.h"
//================================================================================
// debug
//================================================================================
#define OPENMP_PROFILE
#ifdef OPENMP_PROFILE
#define OMP_PROF(X) \
do { \
{ X; } \
} while (0)
#else
#define OMP_PROF(X) \
do { \
} while (0)
#endif
#ifdef OMP_DEBUG_LEVEL
#include "encoding.h"
#include "printf.h"
#define _OMP_PRINTF(...) \
if (1) { \
printf("[omp] "__VA_ARGS__); \
}
#define OMP_PRINTF(d, ...) \
if (OMP_DEBUG_LEVEL >= d) { \
_OMP_PRINTF(__VA_ARGS__); \
}
#else
#define OMP_PRINTF(d, ...)
#endif
//================================================================================
// Macros
//================================================================================
#ifdef OMPSTATIC_NUMTHREADS
#define _OMP_T const omp_t
#define _OMP_TEAM_T const omp_team_t
#else
#define _OMP_T omp_t
#define _OMP_TEAM_T omp_team_t
#endif
//================================================================================
// types
//================================================================================
typedef struct {
char nbThreads;
#ifndef OMPSTATIC_NUMTHREADS
int loop_epoch;
int loop_start;
int loop_end;
int loop_incr;
int loop_chunk;
int loop_is_setup;
int core_epoch[16]; // for dynamic scheduling
#endif
} omp_team_t;
typedef struct {
#ifndef OMPSTATIC_NUMTHREADS
omp_team_t plainTeam;
int numThreads;
int maxThreads;
#else
const omp_team_t plainTeam;
const int numThreads;
const int maxThreads;
#endif
/**
* @brief Pointer to the barrier register used for synchronization eg with
* #pragma omp barrier
*
*/
struct snrt_barrier *kmpc_barrier;
/**
* @brief Usually the arguments passed to __kmpc_fork_call would do a malloc
* with the amount of arguments passed. This is too slow for our case and
* thus we reserve a chunk of arguments in TCDM and use it. This limits the
* maximum number of arguments
*/
_kmp_ptr32 *kmpc_args;
} omp_t;
#ifdef OPENMP_PROFILE
typedef struct {
uint32_t fork_oh;
} omp_prof_t;
extern omp_prof_t *omp_prof;
#endif
#ifndef OMPSTATIC_NUMTHREADS
extern __thread omp_t volatile *omp_p;
#else
extern omp_t omp_p;
#endif
//================================================================================
// exported
//================================================================================
void omp_init(void);
unsigned snrt_omp_bootstrap(uint32_t core_idx);
void partialParallelRegion(int32_t argc, void *data,
void (*fn)(void *, uint32_t), int num_threads);
#ifdef OPENMP_PROFILE
void omp_print_prof(void);
extern omp_prof_t *omp_prof;
#endif
//================================================================================
// inlines
//================================================================================
#ifndef OMPSTATIC_NUMTHREADS
static inline omp_t *omp_getData() { return omp_p; }
static inline omp_team_t *omp_get_team(omp_t *_this) {
return &_this->plainTeam;
}
#else
static inline const omp_t *omp_getData() { return &omp_p; }
static inline const omp_team_t *omp_get_team(const omp_t *_this) {
return &_this->plainTeam;
}
#endif
static inline unsigned omp_get_thread_num(void) {
return snrt_cluster_core_idx();
}
static inline void __attribute__((always_inline))
parallelRegionExec(int32_t argc, void *data, void (*fn)(void *, uint32_t),
int num_threads) {
// Now that the team is ready, wake up slaves
(void)eu_dispatch_push(fn, argc, data, num_threads);
eu_run_empty(snrt_cluster_core_idx());
}
static inline void __attribute__((always_inline))
parallelRegion(int32_t argc, void *data, void (*fn)(void *, uint32_t),
int num_threads) {
partialParallelRegion(argc, data, fn, num_threads);
}
|
math.h | //////////////////////////////////////////////////////////////////////////
// Software License Agreement (BSD License) //
// //
// Copyright (c) 2009 //
// Engin Tola //
// web : http://cvlab.epfl.ch/~tola //
// email : engin.tola@epfl.ch //
// //
// All rights reserved. //
// //
// Redistribution and use in source and binary forms, with or without //
// modification, are permitted provided that the following conditions //
// are met: //
// //
// * Redistributions of source code must retain the above copyright //
// notice, this list of conditions and the following disclaimer. //
// * Redistributions in binary form must reproduce the above //
// copyright notice, this list of conditions and the following //
// disclaimer in the documentation and/or other materials provided //
// with the distribution. //
// * Neither the name of the EPFL nor the names of its //
// contributors may be used to endorse or promote products derived //
// from this software without specific prior written permission. //
// //
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS //
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT //
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS //
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE //
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, //
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, //
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; //
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER //
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT //
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN //
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE //
// POSSIBILITY OF SUCH DAMAGE. //
// //
// See licence.txt file for more details //
//////////////////////////////////////////////////////////////////////////
#ifndef KUTILITY_MATH_H
#define KUTILITY_MATH_H
#include "kutility/general.h"
// #include "kutility/linear_algebra.h"
#include "kutility/convolution.h"
namespace kutility
{
template<typename T>
inline T distance( T a[2], T b[2] )
{
T d0 = a[0]-b[0];
T d1 = a[1]-b[1];
return sqrt( d0*d0+d1*d1 );
}
template<typename T> inline
void shift_array_right( T* arr, int sz, int start )
{
for( int i=sz-2; i>=start; i-- )
{
arr[i+1] = arr[i];
}
}
/// creates a 1D gaussian filter with N(mean,sigma).
inline void gaussian_1d(float* fltr, int fsz, float sigma, float mean )
{
assert(fltr != NULL);
int sz = (fsz-1)/2;
int counter=-1;
float sum = 0.0;
float v = 2*sigma*sigma;
for( int x=-sz; x<=sz; x++ )
{
counter++;
fltr[counter] = exp((-(x-mean)*(x-mean))/v);
sum += fltr[counter];
}
if( sum != 0 )
{
for( int x=0; x<fsz; x++ )
fltr[x] /= sum;
}
}
/// creates a 2D gaussian filter with N(mean,sigma).
inline float* gaussian_2d(int fsz, float sigma, float mn)
{
int fltr_size = fsz * fsz;
float* fltr = new float[fltr_size];
int sz = (fsz-1)/2;
int y,x;
int counter=-1;
float sum=0;
float v = 2*sigma*sigma;
for( y=-sz; y<=sz; y++ )
{
for( x=-sz; x<=sz; x++ )
{
counter++;
fltr[counter] = exp((-(x)*(x-mn)-(y-mn)*(y-mn))/v);
sum += fltr[counter];
}
}
if( sum != 0 )
{
for( x=0; x<fltr_size; x++ )
fltr[x] /= sum;
}
return fltr;
}
template<class T1, class T2> inline
double normalized_cross_correlation( T1* a, T2* b, int sz )
{
double mean_a = 0;
double mean_b = 0;
for( int i=0; i<sz; i++ )
{
mean_a += a[i];
mean_b += b[i];
}
mean_a /= sz;
mean_b /= sz;
double var_a = 0;
double var_b = 0;
double var_ab = 0;
double a_part = 0;
double b_part = 0;
for( int i=0; i<sz; i++ )
{
a_part = ( a[i] - mean_a );
b_part = ( b[i] - mean_b );
var_a += a_part * a_part;
var_b += b_part * b_part;
var_ab += a_part * b_part;
}
var_a /= sz;
var_b /= sz;
var_ab /= sz;
if( var_a != 0 && var_b != 0 )
return var_ab / sqrt(var_a * var_b);
else if ( var_a == 0 && var_b == 0 )
return 1;
else
return -1;
}
inline float pi() { return atan2( 0.0f, -1.0f ); }
/// Applies a 2d gaussian blur of sigma std to the input array. if
/// kernel_size is not set or it is set to 0, then it is taken as
/// 3*sigma and if it is set to an even number, it is incremented
/// to be an odd number. if in_place=true, then T1 must be equal
/// to T2 naturally.
template<class T1, class T2> inline
T1* blur_gaussian_2d( T2* array, int rn, int cn, float sigma, int kernel_size=0, bool in_place=false )
{
T1* out = NULL;
if( in_place )
out = (T1*)array;
else
out = type_cast<T1,T2>(array,rn*cn);
if( kernel_size == 0 )
kernel_size = (int)(3*sigma);
if( kernel_size%2 == 0 ) kernel_size++; // kernel size must be odd
if( kernel_size < 3 ) kernel_size= 3; // kernel size cannot be smaller than 3
float* kernel = new float[kernel_size];
gaussian_1d(kernel, kernel_size, sigma, 0);
// !! apply the filter separately
convolve_sym( out, rn, cn, kernel, kernel_size );
// conv_horizontal( out, rn, cn, kernel, kernel_size);
// conv_vertical ( out, rn, cn, kernel, kernel_size);
deallocate(kernel);
return out;
}
/// inserts a portion of the source to the destination.
template<class Td, class Ts>
void insert( Td* dst, int dcn, int dymin, int dymax, int dxmin, int dxmax,
Ts* src, int scn, int symin=-1, int symax=-1, int sxmin=-1, int sxmax=-1 )
{
int xsz = dxmax - dxmin;
int ysz = dymax - dymin;
if( symin == -1 && symax == -1 && sxmin == -1 && sxmax == -1 )
{
sxmin = 0;
symin = 0;
sxmax = scn;
symax = ysz;
}
if( ysz != symax - symin ) error("insert: intervals must match");
if( xsz != sxmax - sxmin ) error("insert: intervals must match");
for( int y=0; y<ysz; y++ )
{
for( int x=0; x<xsz; x++ )
{
dst[ (dymin+y)*dcn+(dxmin+x) ] = (Td)( src[ (symin+y)*scn+(sxmin+x) ] );
}
}
}
/// swaps the values of y and x
template<class T> inline
void swap(T &y, T &x)
{
T tmp = x;
x = y;
y = tmp;
}
/// inverts a boolean array: 1->0 & 0->1.
inline bool* invert( bool* data, int sz, bool in_place=true)
{
bool* out=NULL;
if( in_place ) out = data;
else out = new bool[sz];
for(int i=0; i<sz; i++)
out[i] = !data[i];
return out;
}
/// extracts a square patch of patch_width x patch_width from a the
/// image around the point ry,rx ;
/// returns true if all the pixels are within the image and false
/// if some of the pixels are outside the image.
template<class T1, class T2>
bool extract_patch( T1* dst, T1* src, int h, int w, T2 ry, T2 rx, int patch_width )
{
float w_2 = patch_width/2;
float x,y;
float yy, xx;
bool out_of_image = true;
int index=0;
for( y=0; y<patch_width; y++ )
{
for( x=0; x<patch_width; x++ )
{
yy = ry + y - w_2;
xx = rx + x - w_2;
if( is_outside( xx, 0, w, yy, 0, h ) )
{
dst[index] = 0;
out_of_image = false;
}
else
{
dst[ index ] = (T1)bilinear_interpolation( src, w, xx, yy );
}
index++;
}
}
return out_of_image;
}
/// extracts a square patch of patch_width x patch_width from a
/// rotated image around the point ry,rx ; rotation_angle is in
/// radians. returns true if all the pixels are within the image
/// and false if some of the pixels are outside the image.
template<class T1, class T2, class T3>
bool extract_rotated_patch( T1* dst, T1* src, int h, int w, T2 ry, T2 rx, int patch_width, T3 rotation_angle )
{
int w_2 = patch_width/2;
float kos = cos( rotation_angle );
float zin = sin( rotation_angle );
int yp, xp;
float yu, xu;
float y, x;
int index = 0;
bool out_of_image = true;
for( yp=0; yp<patch_width; yp++ )
{
for( xp=0; xp<patch_width; xp++ )
{
xu = xp-w_2;
yu = yp-w_2;
x = kos * xu - zin * yu + rx;
y = zin * xu + kos * yu + ry;
if( is_inside( x, 0, w, y, 0, h ) )
{
dst[ index ] = (T1)bilinear_interpolation(src, w, x, y);
}
else
{
dst[ index ] = 0;
out_of_image = false;
}
index++;
}
}
return out_of_image;
}
/// extracts a portion of the matrix [ymin:ymax) & [xmin:xmax)
/// and returns the result.
/// note: you should deallocate the dst memory yourself
/// note: upper boundaries are not included in the output matrix
template<class T>
T* extract( T* src, int xmin, int xmax, int ymin, int ymax, int matw)
{
int xsz = xmax - xmin;
int ysz = ymax - ymin;
T* dst = new T[ysz*xsz];
int yy=0;
int xx=0;
for( int y=ymin; y<ymax; y++ )
{
xx=0;
for( int x=xmin; x<xmax; x++ )
{
dst[yy*xsz+xx] = src[y*matw+x];
xx++;
}
yy++;
}
return dst;
}
/// extracts a portion of the matrix [ymin:ymax) & [xmin:xmax)
/// and returns the result.
/// note: you should deallocate the dst memory yourself
/// note: upper boundaries are not included in the output matrix
template<class T>
T* crop( T* src, int h, int w, int center_y, int center_x, int patch_rad)
{
int pw = 2*patch_rad+1;
int psz = pw*pw;
T* dst = new T[psz];
initialize( dst, psz, 0);
int yy, xx;
for( yy = -patch_rad; yy<=patch_rad; yy++ )
{
if( yy + center_y < 0 || yy + center_y > h )
continue;
for( xx = -patch_rad; xx<=patch_rad; xx++ )
{
if( xx+center_x < 0 || xx+center_x > w )
continue;
dst[ ( yy+patch_rad ) * pw + xx + patch_rad ] = src[ (yy+center_y)*w+xx+center_x ];
}
}
return dst;
}
/// extracts a portion of the matrix [ymin:ymax) & [xmin:xmax)
/// and returns the result in the given pointer.
/// note: you should allocate the dst memory yourself
/// note: upper boundaries are not included in the output matrix
template<class T>
void extract( T* dst, T* src, int xmin, int xmax, int ymin, int ymax, int mw)
{
int xsz = xmax - xmin;
int yy=0;
int xx=0;
for( int y=ymin; y<ymax; y++ )
{
xx=0;
for( int x=xmin; x<xmax; x++ )
{
dst[yy*xsz+xx] = src[y*mw+x];
xx++;
}
yy++;
}
}
/// extracts a portion of the matrix [ymin:ymax) & [xmin:xmax)
/// and returns the result in the given pointer.
/// note: you should deallocate the dst memory yourself
/// note: upper boundaries are not included in the output matrix
template<class T>
T** extract( T** src, int xmin, int xmax, int ymin, int ymax)
{
int xsz = xmax - xmin;
int ysz = ymax - ymin;
T** dst = allocate<T>(ysz,xsz);
int yy=0;
int xx=0;
for( int y=ymin; y<ymax; y++ )
{
xx=0;
for( int x=xmin; x<xmax; x++ )
{
dst[yy][xx] = src[y][x];
xx++;
}
yy++;
}
return dst;
}
/// extracts a portion of the matrix [ymin:ymax) & [xmin:xmax)
/// and returns the result in the given pointer.
/// note: you should allocate the dst memory yourself
/// note: upper boundaries are not included in the output matrix
template<class T>
void extract( T** dst, T** src, int xmin, int xmax, int ymin, int ymax)
{
int yy=0;
int xx=0;
for( int y=ymin; y<ymax; y++ )
{
xx=0;
for( int x=xmin; x<xmax; x++ )
{
dst[yy][xx] = src[y][x];
xx++;
}
yy++;
}
}
/// rounds a number: if real part is bigger than 0,5 rounds up else down
template<class T> inline
T round( T x )
{
T fx = floor(x);
if( x-fx > 0.5 ) return fx+1;
else return fx;
}
/// rounds an array of numbers: if real part is bigger than 0,5
/// rounds up else down.
template<class T> inline
T* round( T* x, int sz, bool in_place = false )
{
T* out;
if( in_place ) out = x;
else out = allocate<T>(sz);
for( int i=0; i<sz; i++ )
{
out[i] = round(x[i]);
}
return out;
}
// /// filters the image with a filter.
// float* filter_2d(float* &im, int r, int c, float* filter, int fr, int fc, bool in_place=false);
/// r & c is the size of the image and filter has fr, fc size. it
/// supports in place filtering. it uses simple for loops and does
/// not employs a fast convolution implementation. beware: it is
/// not equal to convolution - it does not invert the filter.
template<class T>
T* filter_2d(T* &im, int r, int c, T* filter, int fr, int fc, bool in_place)
{
int y,x;
int yy,xx;
int ya,xa;
int yc, yac;
int fr_half = fr/2;
int fc_half = fc/2;
int sz = r*c;
T* out = allocate<T>(sz);
initialize(out, sz, 0);
T sum;
int findex=0;
for( y=0; y<r; y++ )
{
ya = y - fr_half-1;
yc = y*c;
for( x=0; x<c; x++ )
{
sum = 0;
xa = x-fc_half-1;
findex=0;
for( yy=0; yy<fr; yy++ )
{
ya++;
if( is_outside(ya, 0, r) )
{
findex += fc;
continue;
}
yac = ya*c;
for( xx=0; xx<fc; xx++ )
{
xa++;
if( is_outside(xa,0,c) )
{
findex++;
continue;
}
sum += im[yac+xa]*filter[findex++];
}
xa -= fc;
}
ya -= fr;
out[yc++] = sum;
}
}
if( in_place )
{
delete []im;
im = out;
}
return out;
}
/// returns an array filled with ones.
template<class T> inline
T* ones (int r)
{
T* data = allocate<T>(r);
for( int i=0; i<r; i++ )
data[i] = 1;
return data;
}
/// returns an array filled with zeroes.
template<class T> inline
T* zeros(int r)
{
T* data = allocate<T>(r);
memset( data, 0, sizeof(T)*r );
return data;
}
/// computes the square of a number and returns it.
template<class T> inline
T square(T a)
{
return a*a;
}
/// computes the square of an array. if in_place is enabled, the
/// result is returned in the array arr.
template<class T> inline
T* square(T* arr, int sz, bool in_place=false)
{
T* out;
if( in_place ) out = arr;
else out = allocate<T>(sz);
for( int i=0; i<sz; i++ )
out[i] = arr[i]*arr[i];
return out;
}
/// computes the p power of a number and returns it.
template<class T1, class T2> inline
T1 power(T1 a, T2 p)
{
return (T1)pow(a,p);
}
/// computes the p power of an array. if in_place is enabled, the
/// result is returned in the array arr.
template<class T1, class T2> inline
T1* power(T1* arr, int sz, T2 p, bool in_place=false)
{
T1* out;
if( in_place ) out = arr;
else out = allocate<T1>(sz);
for( int i=0; i<sz; i++ )
out[i] = power(arr[i],p);
return out;
}
/// returns the theta component of a point in the range -PI to PI.
template<class T> inline
float angle(T x, T y)
{
return atan2( (float)y, (float)x );
}
/// returns the theta component of a point array in the range -PI to PI.
template<class T> inline
float* angle(T* x, T* y, int lsz)
{
float* ang = allocate<float>(lsz);
for( int k=0; k<lsz; k++ )
{
ang[k] = angle<T>(x[k],y[k]);
}
return ang;
}
/// returns the radial component of a point.
template<class T> inline
T magnitude(T x, T y)
{
return sqrt(x*x+y*y);
}
/// computes the radial component of a 2D array and returns the
/// result in a REAL array. the x&y coordinates are given in
/// separate 1D arrays together with their size.
template<class T> inline
T* magnitude(T* arrx, T* arry, int lsz)
{
T* mag = allocate<T>(lsz);
for( int k=0; k<lsz; k++ )
{
mag[k] = sqrt( arrx[k]*arrx[k] + arry[k]*arry[k] );
}
return mag;
}
/// Converts the given cartesian coordinates of a point to polar
/// ones.
template<class T> inline
void cartesian2polar(T x, T y, float &r, float &th)
{
r = magnitude(x,y);
th = angle(x,y);
}
/// Converts the given polar coordinates of a point to cartesian
/// ones.
template<class T1, class T2> inline
void polar2cartesian(T1 r, T1 t, T2 &y, T2 &x)
{
x = (T2)( r * cos( t ) );
y = (T2)( r * sin( t ) );
}
/// returns an interval list that starts at "st" and ends at "en"
/// having "level_no" levels. The list has entries like :
/// [ s1 e1 ;
/// s2 e2 ;
/// ....
/// sn en ] -> s(i+1) = e(i)
/// the function uses upto 4 point precisions if not specified
template<class T> inline
T** interval( T st, T en, int levels, int prec=4)
{
T** interval_list = allocate<T>(levels, 2);
float step = ((float)(en-st))/levels;
for( int i=0; i<levels; i++ )
{
interval_list[i][0] = i*step+st;
interval_list[i][1] = i*step+st+step;
}
return interval_list;
}
/// computes the gradient of an image and returns the result in
/// pointers to REAL.
template <class T> inline
void gradient(T* im, int h, int w, T* dy, T* dx)
{
assert( dx != NULL );
assert( dy != NULL );
for( int y=0; y<h; y++ )
{
int yw = y*w;
for( int x=0; x<w; x++ )
{
int ind = yw+x;
// dx
if( x>0 && x<w-1 ) dx[ind] = ((T)im[ind+1]-(T)im[ind-1])/2.0;
if( x==0 ) dx[ind] = ((T)im[ind+1]-(T)im[ind]);
if( x==w-1 ) dx[ind] = ((T)im[ind ]-(T)im[ind-1]);
//dy
if( y>0 && y<h-1 ) dy[ind] = ((T)im[ind+w]-(T)im[ind-w])/2.0;
if( y==0 ) dy[ind] = ((T)im[ind+w]-(T)im[ind]);
if( y==h-1 ) dy[ind] = ((T)im[ind] -(T)im[ind-w]);
}
}
}
template<class T> inline
T is_positive( T number )
{
if( number > 0 ) return number;
else return (T)(0);
}
template<class T> inline
T* layered_gradient( T* data, int h, int w, int layer_no=8 )
{
int data_size = h * w;
T* layers = zeros<T>(layer_no * data_size);
// smooth the data matrix
T* bdata = blur_gaussian_2d<T,T>( data, h, w, 0.5, 5, false);
T *dx = new T[data_size];
T *dy = new T[data_size];
gradient(bdata, h, w, dy, dx);
deallocate( bdata );
#if defined(WITH_OPENMP)
#pragma omp parallel for
#endif
for( int l=0; l<layer_no; l++ )
{
float angle = 2*l*pi()/layer_no;
float kos = cos( angle );
float zin = sin( angle );
T* layer_l = layers + l*data_size;
for( int index=0; index<data_size; index++ )
{
float value = kos * dx[ index ] + zin * dy[ index ];
if( value > 0 ) layer_l[index] = value;
else layer_l[index] = 0;
}
}
deallocate(dy);
deallocate(dx);
return layers;
}
/// be careful, 'data' is destroyed afterwards
template<class T> inline
void layered_gradient( T* data, int h, int w, int layer_no, T* layers, T* workspace=0, int lwork=0 )
{
int data_size = h * w;
assert(layers!=NULL);
memset(layers,0,sizeof(T)*data_size*layer_no);
bool empty=false;
T* work=NULL;
if( lwork < 3*data_size ) {
work = new T[3*data_size];
empty=true;
}
// // smooth the data matrix
// T* bdata = blur_gaussian_2d<T,T>( data, h, w, 0.5, 5, false);
float kernel[5]; gaussian_1d(kernel, 5, 0.5, 0);
memcpy( work, data, sizeof(T)*data_size);
convolve_sym( work, h, w, kernel, 5 );
T *dx = work+data_size;
T *dy = work+2*data_size;
gradient( work, h, w, dy, dx );
#if defined(WITH_OPENMP)
#pragma omp parallel for
#endif
for( int l=0; l<layer_no; l++ )
{
float angle = 2*l*pi()/layer_no;
float kos = cos( angle );
float zin = sin( angle );
T* layer_l = layers + l*data_size;
for( int index=0; index<data_size; index++ )
{
float value = kos * dx[ index ] + zin * dy[ index ];
if( value > 0 ) layer_l[index] = value;
else layer_l[index] = 0;
}
}
if( empty ) delete []work;
}
/// computes the bilinearly interpolated value of the point (x,y).
template<class T1, class T2> inline
float bilinear_interpolation(T1* arr, int w, T2 x, T2 y)
{
int mnx = (int)floor( x );
int mny = (int)floor( y );
int mxx = (int) ceil( x );
int mxy = (int) ceil( y );
double alfa = mxx - x;
double beta = mxy - y;
if( alfa < 0.001 ) alfa = 0;
if( beta < 0.001 ) beta = 0;
int mnyw = mny * w;
int mxyw = mxy * w;
if( alfa < 0.001 ) return float(beta * arr[mnyw+mxx] + (1-beta) * arr[mxyw+mxx]);
if( alfa > 0.999 ) return float(beta * arr[mnyw+mnx] + (1-beta) * arr[mxyw+mnx]);
if( beta < 0.001 ) return float(alfa * arr[mxyw+mnx] + (1-alfa) * arr[mxyw+mxx]);
if( beta > 0.999 ) return float(alfa * arr[mnyw+mnx] + (1-alfa) * arr[mnyw+mxx]);
return float( beta*(alfa * arr[mnyw+mnx] + (1-alfa)*arr[mnyw+mxx] )
+(1-beta)*(alfa * arr[mxyw+mnx] + (1-alfa)*arr[mxyw+mxx] ) );
}
/// divides the elements of the array with "norm". function
/// supports in-place operations in which case the result is casted
/// to the input type; default is non-in-place.
template<class T1, class T2> inline
T2* normalize(T1* data, int sz, T2 norm, bool in_place=false)
{
assert( norm != 0.0 );
float inv_norm = 1.0/norm;
if( in_place )
{
for( int i=0; i<sz; i++ )
{
data[i] = (T1)(data[i]*inv_norm);
}
return NULL;
}
else
{
T2* new_data = allocate<T2>(sz);
for( int i=0; i<sz; i++ )
{
new_data[i] = (T2)(data[i]*inv_norm);
}
return new_data;
}
}
template<typename T> inline
void diff( const T* a, const T* b, const int sz, T* a_m_b)
{
for( int k=0; k<sz; k++ )
a_m_b[k] = a[k] - b[k];
}
/// computes the difference of two arrays and returns the resulting
/// array. function supports in place operation, and returns the
/// result in the "a" array if in place is enabled.
template<class T> inline
T* diff( T* a, const T* b, const int sz, bool in_place=false)
{
T* d=NULL;
if( in_place ) d = a;
else d = allocate<T>(sz);
for( int k=0; k<sz; k++ )
{
d[k] = a[k]-b[k];
}
return d;
}
/// computes the absolute difference of two arrays and returns the
/// resulting array : d = |a-b|. function supports in place
/// operation, and returns the result in the "a" array if in place
/// is enabled.
template<class T> inline
T* absdiff( T* a, T* b, int sz, bool in_place=false)
{
T* d=NULL;
if( in_place ) d = a;
else d = allocate<T>(sz);
for( int k=0; k<sz; k++ )
{
d[k] = (T)fabs(a[k]-b[k]);
}
return d;
}
/// computes the absolute difference of two matrices and returns
/// the resulting matrix : d = |a-b|. function supports in place
/// operation, and returns the result in the "a" matrix if in place
/// is enabled.
template<class T> inline
T** absdiff( T** a, T** b, int ysz, int xsz, bool in_place=false)
{
T** d=NULL;
if( in_place ) d = a;
else d = allocate<T>(ysz,xsz);
for( int y=0; y<ysz; y++ )
{
for( int x=0; x<xsz; x++ )
{
d[y][x] = fabs(a[y][x]-b[y][x]);
}
}
return d;
}
/// computes the l1norm of an array: sum_i( |a(i)| )
template<class T> inline
T l1norm( T* a, int sz)
{
T norm=0;
for( int k=0; k<sz; k++ )
norm += abs( a[k] );
}
/// computes the l1norm of the difference of two arrays: sum_i( a(i)-b(i) )
template<class T> inline
T l1norm( T* a, T* b, int sz)
{
T norm=0;
for( int k=0; k<sz; k++ )
norm += abs(a[k]-b[k]);
return norm;
}
/// computes the l2norm of an array: [ sum_i( [a(i)]^2 ) ]^0.5
template<class T> inline
float l2norm( T* a, int sz)
{
float norm=0;
for( int k=0; k<sz; k++ )
norm += a[k]*a[k];
return sqrt(norm);
}
/// computes the l2norm of the difference of two arrays: [ sum_i( [a(i)-b(i)]^2 ) ]^0.5
template<class T1, class T2> inline
float l2norm( T1* a, T2* b, int sz)
{
float norm=0;
for( int i=0; i<sz; i++ )
{
norm += square( (float)a[i] - (float)b[i] );
}
norm = sqrt( norm );
return norm;
}
template<class T> inline
float l2norm( T y0, T x0, T y1, T x1 )
{
float d0 = x0 - x1;
float d1 = y0 - y1;
return sqrt( d0*d0 + d1*d1 );
}
/// computes the l2 norm of the difference of two arrays by
/// weighting regions of them. if reg is set to -1 (or not
/// specified) each difference is weighted. if reg is not -1,
/// arrays are assumed to be composed of sz/reg segments and the
/// weighting is applied to these segments. reg must be a integer
/// multiple of sz.
template<class T1, class T2> inline
float weighted_l2_norm(T1* a, T1* b, int sz, T2* w=NULL, int reg=-1)
{
if( w == NULL )
error("weight array is NULL. use more efficient l2norm instead");
int wsz;
if( reg == -1 ) wsz = sz;
else wsz = reg;
int rsz = sz / reg;
if( rsz*reg != reg )
error("reg must be an integer multiple of array size sz");
int k;
float norm=0;
float sub_norm=0;
for( k=0; k<wsz; k++ )
{
sub_norm = l2norm( a+k*rsz, b+k*rsz, rsz );
norm += w[k] * sub_norm;
}
return norm;
}
template<class T1, class T2> inline
float mean_absolute_difference( T1* arr1, T2* arr2, int size)
{
float mad_score=0;
for( int i=0; i<size; i++ )
{
mad_score += fabs( (float)arr1[i] - (float)arr2[i] );
}
return mad_score/size;
}
/// adds a constant number to every number in the array;
template<class T1, class T2> inline
T1* add(T1* arr, int sz, T2 num, bool in_place=false)
{
T1* out;
if( in_place ) out = arr;
else out = allocate<T1>(sz);
for( int i=0; i<sz; i++ )
{
out[i] = arr[i] + (T1)num;
}
return out;
}
/// adds a constant number to every number in the matrix;
template<class T1, class T2> inline
T1** add(T1** arr, int ysz, int xsz, T2 num, bool in_place=false)
{
T1** out;
if( in_place ) out = arr;
else out = allocate<T1>(ysz,xsz);
for( int y=0; y<ysz; y++ )
for( int x=0; x<xsz; x++ )
{
out[y][x] = arr[y][x] + (T1)num;
}
return out;
}
/// subtracts a constant number from every element in the array;
template<class T1, class T2> inline
T1* subt(T1* arr, int sz, T2 num, bool in_place=false)
{
T1* out = add(arr,sz,-num,in_place);
return out;
}
/// subtracts a constant number from every element in the matrix;
template<class T1, class T2> inline
T1** subt(T1** arr, int ysz, int xsz, T2 num, bool in_place=false)
{
T1* out = add(arr,ysz,xsz,-num,in_place);
return out;
}
/// divides the elements of the array with num
template<class T1, class T2> inline
void divide(T1* arr, int sz, T2 num )
{
float inv_num = 1.0 / num;
for( int i=0; i<sz; i++ )
{
arr[i] = (T1)(arr[i]*inv_num);
}
}
/// thresholds the data.
template<class T> inline
T* threshold(T* data, int sz, T threshold)
{
if(sz == 0) return NULL;
T* result = allocate<T>(sz);
for(int i=0; i<sz; i++)
{
if( data[i] > threshold ) result[i] = 1;
else result[i] = 0;
}
return result;
}
/// returns the sign of a point.
template<class T> inline
int sign(T num)
{
if( num < 0.0 ) return -1;
if( num == 0.0 ) return 0;
if( num > 0.0 ) return 1;
}
/// returns the sign array of an array.
template<class T> inline
int* sign(T* arr, int sz)
{
int* out = allocate<int>(sz);
for( int k=0; k<sz; k++ )
{
out[k] = sign( arr[k] );
}
return out;
}
template<class T> inline
int compare( const void* a, const void* b )
{
return (int)(*(T*)a - *(T*)b);
}
/// sorts the data array "data".
template<class T> inline
T* quick_sort( T* data, int dsz, bool in_place=true)
{
T* out=NULL;
if( in_place ) out = data;
else out = clone(data, dsz);
std::qsort( out, dsz, sizeof(T), compare<T> );
return out;
}
template<class T> inline
T median(T* data, int dsz)
{
T* tmp = quick_sort(data, dsz, false);
T med=0;
if( dsz%2 == 1 ) med = tmp[ dsz/2 ];
else med = (tmp[ dsz/2 ] + tmp[ dsz/2 - 1 ] ) /2;
deallocate(tmp);
return med;
}
/// computes the median of the array: destroys the contents of the data array.
template<typename T> inline
void median( T* data, int sz, T &medval )
{
std::qsort(data, sz, sizeof(T), compare<T> );
if( sz%2 == 1 ) medval = data[sz/2];
else medval = (data[sz/2]+data[sz/2-1])/2;
}
template<typename T> inline
void smooth_median( T* data, int h, int w, int msz, T* out )
{
int wsz=(2*msz+1)*(2*msz+1);
const static int max_buffer_size = 441;
assert( wsz < max_buffer_size );
T buffer[max_buffer_size];
for( int y=0; y<h; y++ )
{
for( int x=0; x<w; x++ )
{
int cnt = 0;
for( int r=-msz; r<=msz; r++ )
{
int yy = y+r;
if( yy >= h ) yy = h-1;
if( yy < 0 ) yy = 0;
for( int c=-msz; c<=msz; c++ )
{
int xx=x+c;
if( xx >= w ) xx = w-1;
if( xx < 0 ) xx = 0;
buffer[cnt++] = data[yy*w+xx];
}
}
median( buffer, wsz, out[y*w+x] );
}
}
}
/// multiplies two arrays element by element.
/// the result is in the first array's type
template<class T1, class T2> inline
T1* times( T1* arr1, T2* arr2, int w)
{
T1* out = allocate<T1>(w);
for( int i=0; i<w; i++ )
out[i] = (T1)(arr1[i]*arr2[i]);
return out;
}
/// multiplies two matrices element by element.
/// the result is in the first matrix's type
template<class T1, class T2> inline
T1** times( T1** mat1, T2** mat2, int h, int w)
{
T1** out = allocate<T1*>(h);
for( int i=0; i<h; i++ )
out[i] = times( mat1[i], mat2[i], w );
return out;
}
/// convert a ** data to a * data in row-first order.
/// it uses memcpy, therefore, works for built-in types.
template<class T> inline
T* arrayize(T** data, int xsz, int ysz)
{
T* out = allocate<T>(xsz*ysz);
for( int i=0; i<ysz; i++ )
memcpy(out[i*xsz],data[i],sizeof(T)*xsz);
return out;
}
/// inplace shifting: accepts negative shifts
template<class T>
T* shift_array(T* arr, int size, int shift)
{
// if shift = 0 -> you can return now
if( shift == 0 ) return arr;
T* temp_array = allocate<T>(size);
// if negative -> compansate
if( shift < 0 ) shift += size;
// copy the first portion
memcpy(temp_array, arr+shift, sizeof(T)*(size-shift) );
// copy the rest
memcpy(temp_array+size-shift, arr, sizeof(T)*shift );
memcpy(arr,temp_array,size);
deallocate(temp_array);
return arr;
}
/// shifts the contents of the array in segmented regions
/// i.e: shifts the contents by "shift" in a segment
/// size = n*segment, n = integer
template<class T>
T* segmented_shift_array(T* &arr, int size, int segment, int shift)
{
int segment_step = size / segment;
if( shift == 0 ) return arr;
for( int s=0; s<size; s += segment_step )
{
shift_array(arr+s, segment_step, shift);
}
return arr;
}
/// counts the number of times the value val occurs in data[]
template<class T> inline
int count( T* data, int sz, T val)
{
int counter = 0;
for(int i=0; i<sz; i++)
{
if( data[i] == val )
counter++;
}
return counter;
}
template<class T1, class T2> inline
void set(T1* data, int sz, T2 val)
{
for( int k=0; k<sz; k++ )
data[k]=(T1)val;
}
template<class T1, class T2> inline
void set(T1** data, int rsz, int csz, T2 val)
{
for( int r=0; r<rsz; r++ )
for( int c=0; c<csz; c++ )
data[r][c]=(T1)val;
}
/// rotates x1 y1 by theta (in radians)
template<class T1, class T2> inline
void rotate( T1 y1, T1 x1, T2& y2, T2& x2, float theta, T1 ty, T1 tx )
{
float kos = cos( theta );
float zin = sin( theta );
x2 = (T2)( x1*kos - y1*zin );
y2 = (T2)( x1*zin + y1*kos );
return;
}
/// rotates the image with respect to ry, rx.
template<class T> inline
T* rotate( T* imge, int h, int w, float theta, float ry=0, float rx=0 )
{
float kos = cos(theta);
float zin = sin(theta);
int x, y;
T* rimge = allocate<T>(h*w);
initialize(rimge, h*w, 0);
float ty, tx;
float ny, nx;
for( y=0; y<h; y++ )
{
for( x=0; x<w; x++ )
{
tx = x - rx;
ty = y - ry;
nx = ( tx * kos - ty * zin + rx );
ny = ( tx * zin + ty * kos + ry );
if( is_inside( nx, 0, w-1, ny, 0, h-1 ) )
rimge[y*w+x] = (T)bilinear_interpolation(imge, w, nx, ny);
}
}
return rimge;
}
/// stretches the image to minI=0 -- maxI=255 range
template<class T> inline
T* stretch(T* image, int sz, T val, bool in_place=false)
{
// find the min intensity in roi
T min_inten=INT_MAX;
T max_inten=INT_MIN;
for( int k=0; k<sz; k++ )
{
if( image[k] <= val ) continue;
if( image[k] < min_inten ) min_inten = image[k];
if( image[k] > max_inten ) max_inten = image[k];
}
float s = 255.0f/(float)(max_inten-min_inten);
T* output = NULL;
if( in_place ) output = image;
else output = zeros<T>(sz);
for( int k=0; k<sz; k++ )
{
if( image[k] > val )
output[k] = (T)((image[k]-min_inten) * s);
else
output[k] = image[k];
}
return output;
}
/// returns the number of digits of a number.
inline int digit_number(int num)
{
if( num == 0 ) return 1;
int counter = 0;
while( num != 0 )
{
num /= 10;
counter++;
}
return counter;
}
/// returns the value of a sigmoid spanning miny-maxy with 'rate'
/// and x-symmetry axis sym_axis.
/// miny-maxy : the minimum and maximum interval for the y axis.
/// rate : the rate at which the sigmoid reaches maxy from miny.
/// sym_axis : symmetry axis in the x axis. sig(sx-d)+sig(sx+d) = maxy:
/// sum of the y values from the symmetry point makes maxy.
inline float sigmoid(float x, float miny, float maxy, float rate, float sym_axis)
{
float xp = exp(rate*(x-sym_axis));
return (maxy - miny) * xp / ( xp + 1 ) + miny;
}
/// returns the "and" of two boolean arrays.
inline bool* and_array( bool* a, bool* b, int sz)
{
bool* c = allocate<bool>(sz);
for( int i=0; i<sz; i++ )
c[i] = a[i] & b[i];
return c;
}
/// returns the "or" of two boolean arrays.
inline bool* or_array( bool* a, bool* b, int sz)
{
bool* c = allocate<bool>(sz);
for( int i=0; i<sz; i++ )
c[i] = a[i] | b[i];
return c;
}
/// finds the n local-modes: locals -> return indices, workspace[sz]
template<typename T> inline
void find_n_local_min(const T* arr, const int sz, int* locals, const int n, T* workspace )
{
int min_count=0;
for( int i=0; i<sz; i++ ) workspace[i] = -1;
for( int i=0; i<n; i++ ) locals[i] = -1;
T prev=INT_MAX;
T next=INT_MAX;
for( int i=0; i<sz; i++ )
{
if( i > 0 ) prev = arr[i-1]; else prev = INT_MAX;
if( i < sz-1 ) next = arr[i+1]; else next = INT_MAX;
if( (arr[i] < prev) && (arr[i] < next) )
{
workspace[min_count] = i;
min_count++;
}
}
// cout<<"mins\n";
// for( int i=0; i<min_count; i++ )
// {
// cout<<workspace[i]<<" ";
// if( workspace[i] != -1 ) cout<<arr[(int)workspace[i]]<<endl;
// else cout<<-1<<endl;
// }
// cout<<endl;
bool inserted=false;
int fn=1;
locals[0] = workspace[0];
for( int j=1; j<min_count; j++ )
{
inserted=false;
if( workspace[j] == -1 ) break;
for( int k=0; k<fn; k++ )
{
if( arr[ (int)workspace[j] ] <= arr[ locals[k] ] )
{
shift_array_right( locals, n, k );
locals[k] = workspace[j];
if( fn < n ) fn++;
inserted=true;
break;
}
}
if( !inserted && (fn < n) )
{
locals[fn] = workspace[j];
fn++;
}
}
for( int i=fn; i<n; i++ ) locals[i]=-1;
// cout<<"locals\n";
// for( int i=0; i<n; i++ )
// {
// cout<<locals[i]<<" ";
// if( locals[i] != -1 ) cout<<arr[locals[i]]<<endl;
// else cout<<-1<<endl;
// }
// cout<<endl;
}
}
#endif
|
dot_product_1.c | #include "testing_utils.h"
// dot product of two matrices with block optimization
void dotProductBlockOptimized(float ** __restrict__ c, float ** __restrict__ a, float ** __restrict__ b, int n){
omp_set_num_threads(24);
for(int j_block = 0; j_block < n; j_block += BLOCK_SIZE)
for(int k_block = 0; k_block < n; k_block += BLOCK_SIZE)
#pragma omp parallel for
for(int i = 0; i < n; i ++)
for(int k = k_block; k < k_block+BLOCK_SIZE; k ++){
for(int j = j_block; j < j_block+BLOCK_SIZE; j ++)
c[i][j] += a[i][k] * b[k][j];
}
}
//main function
int main(){
unsigned seed=0;
float __attribute__((aligned(32))) **a = (float**)malloc(sizeof(float*)*N);
float __attribute__((aligned(32))) **b = (float**)malloc(sizeof(float*)*N);
float __attribute__((aligned(32))) **c = (float**)malloc(sizeof(float*)*N);
srand(seed);
//build matrix A with random values and C initilized with 0's
for(int i = 0; i < N; i++){
c[i] = (float*) malloc(sizeof(float)*N);
a[i] = (float*) malloc(sizeof(float)*N);
for(int j = 0; j < N; j++){
a[i][j] = rand();
c[i][j] = 0;
}
}
//build matrix B with all elements equals to 1
for(int i = 0; i < N; i++){
b[i] = (float*) malloc(sizeof(float)*N);
for(int j = 0; j < N; j++)
b[i][j] = 1;
}
start();
dotProductBlockOptimized(c,a,b,N);
printf("%llu usecs \n", stop());
return 0;
}
|
GB_unop__identity_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_fp32)
// op(A') function: GB (_unop_tran__identity_fp64_fp32)
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_fp32)
(
double *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bget_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_uint16
// A.*B function (eWiseMult): GB_AemultB__bget_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bget_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint16
// C=scalar+B GB_bind1st__bget_uint16
// C=scalar+B' GB_bind1st_tran__bget_uint16
// C=A+scalar GB_bind2nd__bget_uint16
// C=A'+scalar GB_bind2nd_tran__bget_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, uint16_t, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bget_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB_bind1st_tran__bget_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB_bind2nd_tran__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
operator_tune-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#define MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#include <dmlc/base.h>
#include <dmlc/logging.h>
#include <mshadow/base.h>
#include <atomic>
#include <cstdint>
#include <chrono>
#include <thread>
#include <string>
#include <vector>
#include <algorithm>
#include <list>
#include <random>
#include <unordered_set>
#include "./mxnet_op.h"
#include "./operator_tune.h"
#if (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && !defined(__mips__)
# define HAS_CXA_DEMANGLE 1
#else
# define HAS_CXA_DEMANGLE 0
#endif
#if HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace mxnet {
namespace op {
#ifndef MXNET_NO_INLINE
#ifdef _MSC_VER
#define MXNET_NO_INLINE __declspec(noinline)
#else
#define MXNET_NO_INLINE __attribute__((noinline))
#endif
#endif // MXNET_NO_INLINE
#define OUTSIDE_COUNT_SHIFT 9
namespace tune {
/*!
* \brief Convert TuningMode value to a string representation
* \param tm Scalar TuningMode value
* \return Character pointer to a string representing the TuningMode value
*/
inline const char *TuningModeToString(const TuningMode tm) {
switch (tm) {
case kAuto:
return "Auto";
case kNeverOMP:
return "NeverOMP";
case kAlwaysOMP:
return "AlwaysOMP";
default:
CHECK(false) << "Unknown TuningMode type: " << static_cast<int>(tm);
return "<unknown>";
}
}
} // namespace tune
/*!
* \brief Engine to tune kernel operations
* \tparam DType Data type to be used when tuning the kernel operations
* \remarks The basic concept here is that we time how long a trivial loop takes with and without
* OMP, subtracting the non-OMP run from the OMP run, which gives us the time
* that the OMP overhead takes. Times were found to be relatively invariant with
* regard ot the number of threads/cores on a given machine.
* Secondly, supplied operators are run and timed (for each data type) in order to determine
* their individual time cost.
*
* Knowing the following items, we can determine how long the OMP and non-OMP run
* is expected to take:
* 1) OMP overhead time
* 2) Number of iterations required
* 3) Number of threads to be used if we choose the OMP method
* 4) The data type
*
* Therefore, at Kernel::Launch() time, we can estimate whether it is faster to use OMP or not
* for the given kernel operator.
*
* Results and efficiency of the tuning is tested in the gtest OMP_TUNING test suite
*/
template<typename DType>
class OperatorTune : public OperatorTuneByType<DType> {
public:
using Tick = OperatorTuneBase::Tick;
using duration_t = OperatorTuneBase::duration_t;
using OperatorTuneByType<DType>::tuning_mode_;
/*!
* \brief Constructor
*/
OperatorTune() {
TuneAll();
}
/*!
* \brief Initialize the OperatorTune object
* \return Whether the OperatorTune object was successfully initialized
*/
static bool Initialize() {
if (!initialized_) {
initialized_ = true;
// Generate some random data for calling the operator kernels
data_set_.reserve(0x100);
std::random_device rd;
std::mt19937 gen(rd());
if (!std::is_integral<DType>::value) {
std::uniform_real_distribution<> dis(-1, 1);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If too close to zero, try again
if (std::fabs(static_cast<double>(val)) < 1e-5) {
--n;
continue;
}
data_set_.emplace_back(val);
}
} else {
std::uniform_int_distribution<> dis(-128, 127);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If zero, try again
if (!val) {
--n;
continue;
}
data_set_.emplace_back(val);
}
}
// Use this environment variable to generate new tuning statistics
// In order to avoid printing too many copies, only the float32 object prints
output_tuning_data_ = mshadow::DataType<DType>::kFlag == mshadow::kFloat32
&& dmlc::GetEnv("MXNET_OUTPUT_TUNING_DATA", false);
// If outputting tuning data, then also output verbose logging info
OperatorTuneBase::verbose_tuning_info_ = dmlc::GetEnv("MXNET_VERBOSE_TUNING_INFO", false);
OperatorTuneBase::tuning_weight_scale_ = dmlc::GetEnv("MXNET_TUNING_WEIGHT_SCALE", 0.0);
// This isn't actually supposed to be multithreaded init, but just to be sure the change is
// seen everywhere, using atomic bool.
if (!OperatorTuneBase::calculated_.load()) {
// Not especially concerned with a race condition, since this hsould
// run when only one thread is active (static init), just don't cache this variable
OperatorTuneBase::calculated_.store(true);
std::string config = dmlc::GetEnv("MXNET_USE_OPERATOR_TUNING", std::string());
StringUtil::trim(&config);
// disabled
if (!config.empty() && ::isdigit(config[0]) && std::atoi(config.c_str()) == 0) {
OperatorTuneBase::omp_overhead_ns_ = INT_MAX;
} else {
OperatorTuneBase::omp_overhead_ns_ = GetOMPLoopOverhead();
}
ParseEnablerConfig(config);
}
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP overhead: " << OperatorTuneBase::omp_overhead_ns_ << " nanoseconds";
}
}
return true;
}
/*!
* \brief Schedule a tuning run
* \tparam OP Operator to tune
* \param tune_func Function to call which tunes the operator
* \return true if the tune operation was scheduled
*/
template<typename OP>
static bool ScheduleTune(void (*tune_func)()) {
#ifdef MXNET_USE_OPERATOR_TUNING
if (tune_func) {
GetTuningList()->push_back(tune_func);
operator_names_.insert(demangle(typeid(OP).name()));
return true;
}
return false;
#else
return true;
#endif
}
/*!
* \brief Is the template parameter type a tuned kernel?
* \tparam OP kernel operator type
* \return true if the operator/kernel is tuned
*/
template<typename OP>
static bool IsTuned() {
return operator_names_.find(demangle(typeid(OP).name())) != operator_names_.end();
}
/*!\
* \brief Tune all registered kernel operators that haven't already been tuned
*/
static bool TuneAll() {
Initialize();
std::list<void (*)()> *tl = GetTuningList();
const size_t size_save = tl->size(); // For checking if anything asynchronous is
// adding or removing items, which is forbidden
if (output_tuning_data_ && !tl->empty()) {
// Only emit this once, use the most common case, 'float32'
if (mshadow::DataType<DType>::kFlag == mshadow::kFloat32) {
std::cout << "OperatorTuneBase::duration_t "
<< "OperatorTuneBase::omp_overhead_ns_ = " << OperatorTuneBase::omp_overhead_ns_
<< ";" << std::endl << std::flush;
}
}
const Tick start = std::chrono::high_resolution_clock::now();
for (auto i : *tl) {
(*i)();
}
if (OperatorTuneBase::verbose_tuning_info_) {
const duration_t duration = OperatorTune::GetDurationInNanoseconds(start);
LOG(INFO) << "Op Tuning for " << type_name<DType>()
<< " took " << (duration / 1000000) << " ms";
}
CHECK_EQ(size_save, tl->size()) << "Tuning list size should not have changed while tuning";
tl->clear();
return true;
}
/*!
* \brief Return set of operator names that were registered to be tuned. Does not imply
* that the operator has been tuned.
* \return Set of operator/kernel names that were registered for tuning
*/
static const std::unordered_set<std::string>& TunedOperatorNames() {
return operator_names_;
}
protected:
/*!
* \brief Get the list of tuning function calls for the operators
* \return Pointer to list of tuning function calls
*/
static std::list<void (*)()> *GetTuningList();
/*!
* \brief Demangle typeid::name() in order to generate source macros
* \param name C++ Mangled name
* \return Demangled name as string
*/
static inline std::string demangle(const char *name) {
#if HAS_CXA_DEMANGLE
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void *)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status),
&std::free
};
return status ? name : res.get();
#else
return name;
#endif
}
/*!
* \brief Type name as string
* \tparam T Type
* \return std::string representing the human-readable demangled type name
*/
template<typename T> static inline std::string type_name() {
return demangle(typeid(T).name());
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \param omp_thread_count - Number of OMP threads to use in the timing test
* \returns Duration in nanoseconds for the OMP overhead (time to initiate and close the
* OMP session)
*/
static duration_t GetOMPLoopOverhead(const size_t omp_thread_count) {
CHECK_GT(omp_thread_count, 1); // Don't try to use OMP for one thread
int wl_count = OperatorTuneBase::WORKLOAD_COUNT;
Tick start = std::chrono::high_resolution_clock::now();
// Use two loops in order to simulate OMP outside timing
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const OperatorTuneBase::duration_t no_omp_duration =
OperatorTuneBase::GetDurationInNanoseconds(start);
// Scale OMP iterations by type calculation complexity
double factor;
// if tuning_weight_scale_ is a number that looks valid, use it as the factor
if (OperatorTuneBase::tuning_weight_scale_ > 0.01) {
factor = OperatorTuneBase::tuning_weight_scale_;
} else {
// These are empirically-determined constants found by balancing between
// a desktop (8 & 12 cpu's) and large cloud instances (32 & 64 cpu's)
switch (mshadow::DataType<DType>::kFlag) {
case mshadow::kUint8:
case mshadow::kInt8:
factor = 8.5;
break;
case mshadow::kInt32:
factor = 4.5;
break;
case mshadow::kInt64:
factor = 2;
break;
case mshadow::kFloat64:
factor = 1.25;
break;
case mshadow::kFloat32:
default:
factor = 1.0;
break;
}
}
wl_count = static_cast<int>(factor * OperatorTuneBase::WORKLOAD_COUNT * omp_thread_count);
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
#pragma omp parallel for num_threads(omp_thread_count)
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const duration_t omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start)
- no_omp_duration;
return omp_duration >> OUTSIDE_COUNT_SHIFT;
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \returns Time in nanoseconds to initialize/cleanup when excuting an OMP block
*/
static duration_t GetOMPLoopOverhead() {
// It was found empirically that OMP times was not heavily tied to number of cores,
// so take an average across all core counts
const auto max_cores = static_cast<size_t>(omp_get_num_procs()) >> 1;
if (max_cores >= 2) {
std::vector<duration_t> core_times;
// Take care of any OMP lazy-init with a throwaway call
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
GetOMPLoopOverhead(omp_threads);
}
std::vector<duration_t> durations;
durations.reserve(max_cores - 1);
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
const duration_t duration = GetOMPLoopOverhead(omp_threads);
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP Thread Count: " << omp_threads << ", overhead: " << duration << " ns";
}
durations.emplace_back(duration);
}
// return median
std::sort(durations.begin(), durations.end());
return durations[durations.size() >> 1];
}
return INT_MAX; // If only one core, then never use OMP (say the overhead is huge)
}
/*!
* \brief Some string utility functions that aren't specific to tuning
*/
struct StringUtil {
/*!
* \brief Terim whitespace from beninning and end of string
* \param s String to trimp
* \return reference to the modified string. This is the same std::string object as what was
* supplied in the parameters
*/
static std::string &trim(std::string *s) {
s->erase(s->begin(), std::find_if(s->begin(), s->end(), [](int ch) {
return !std::isspace(ch);
}));
s->erase(std::find_if(s->rbegin(), s->rend(), [](int ch) {
return !std::isspace(ch);
}).base(), s->end());
return *s;
}
/*!
* \brief Tokenize a string into a list of tokens
* \param s String to tokenize
* \return std::list of tokens
*/
static std::list<std::string> string2list(const std::string &s) {
std::list<std::string> res;
std::istringstream iss(s);
std::string token;
while (std::getline(iss, token, ',')) {
trim(&token);
if (!token.empty()) {
res.push_back(token);
}
}
return res;
}
};
/*!
* \brief Get data type from string representation
* \warning Do not call from a performance-sensitive area
*/
static int type_from_string(const std::string& type_string) {
if (type_string == "float32")
return mshadow::kFloat32;
if (type_string == "float64")
return mshadow::kFloat64;
if (type_string == "float16")
return mshadow::kFloat16;
if (type_string == "int8")
return mshadow::kInt8;
if (type_string == "uint8")
return mshadow::kUint8;
if (type_string == "int32")
return mshadow::kInt32;
if (type_string == "int64")
return mshadow::kInt64;
return -1; // invalid
}
/*!
* \brief Parse MXNET_USE_OPERATOR_TUNING environment variable
* \param config String representation of MXNET_ENABLE_OPERATOR_TUNING environment variable
* Values:
* 0=disable all
* 1=enable all
* float32, float16, float32=list of types to enable, and disable those not listed
*/
static void ParseEnablerConfig(std::string config) {
StringUtil::trim(&config);
if (!config.empty()) {
// First disable all
OperatorTuneByType<float>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<double>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAlwaysOMP);
// See if it's a non-number (ie type or list of types)
if (!::isdigit(config[0])) {
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
std::list<std::string> tokens = StringUtil::string2list(config);
for (const std::string& stype : tokens) {
// We don't have an enum for halt_t
const int typ = type_from_string(stype);
if (typ >= 0) {
switch (typ) {
case mshadow::kFloat32:
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat64:
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat16:
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt8:
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kUint8:
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt32:
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt64:
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
break;
default:
CHECK(false) << "Unsupported tuning data type: " << stype;
break;
}
} else {
// -1 is error
LOG(WARNING) << "Unknown data type to be tuned: " << stype;
}
}
} else {
if (std::atoi(config.c_str()) > 0) {
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
}
}
}
}
/*! \brief Whether this object has been initialized */
static bool initialized_;
/*! \brief Number of passes to obtain an average */
static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT);
/*! \brief Random data for timing operator calls */
static std::vector<DType> data_set_;
/*! \brief Operators tuned */
static std::unordered_set<std::string> operator_names_;
/*! \brief Arbitary object to modify in OMP loop */
static volatile int volatile_int_;
/*! \brief Output insertable (into code) instantiation+default-value macros */
static bool output_tuning_data_;
};
/*!
* \brief Class that tunes unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class UnaryOpTune : public OperatorTune<DType> {
protected:
typedef OperatorTune<DType> Super;
using duration_t = typename Super::duration_t;
using Tick = typename Super::Tick;
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take no arguments (ie set_zero)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res += OP::Map();
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take one argument (ie sqrt())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetUnaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take two arguments (ie elemwise_add())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static inline duration_t GetBinaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take three arguments (ie backwards_grad<elemwise_add>())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetTertiaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF],
Super::data_set_[(i + 1) & 0xFF],
Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for mxnet-like kernels that take no arguments)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkloadEx() {
std::unique_ptr<DType[]> tmp(new DType[Super::WORKLOAD_COUNT]);
DType *tmp_ptr = tmp.get();
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
OP::Map(i, tmp_ptr);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
public:
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetUnaryWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes a backward operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
GetBinaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified "mxnet_op-type" kernel operator.
* Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperatorEx() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkloadEx<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BLANK_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Determine whether to use OMP based upon both timing and configuration using the
* given (templated) operator's workload
* \tparam OP Operator whose workload to use (tuned_op::workload_[0])
* \param N Number of iterations desired
* \param thread_count Number of OMP threads available to perform the iterations
* \returns Whether it's faster to use OMP for these iterations
*/
template<typename OP>
inline static bool UseOMP(size_t N, size_t thread_count) {
return OperatorTune<DType>::UseOMP(N,
thread_count,
static_cast<uint64_t>(N) * OP::workload_[0]);
}
};
/*!
* \brief Class that tunes binary and unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class BinaryOpTune : public UnaryOpTune<DType> {
protected:
typedef UnaryOpTune<DType> Super;
public:
/*!
* \brief Tune a generic binary operator
* @tparam OP - Operator type
*/
template<typename OP>
static void TuneBinaryOperator() {
mxnet_op::tuned_op<OP, DType>::workload_[0] = Super::template GetBinaryWorkload<OP>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune binary backward operator
* \tparam OP - operator
*/
template<typename OP>
static void TuneBinaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
Super::template GetTertiaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
};
#undef OUTSIDE_COUNT_SHIFT
#undef WORKLOAD_COUNT_SHIFT
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
|
Determanager.h | /*****************************************************************************************[Cooperation.h]
Copyright (c) 2008-20011, Youssef Hamadi, Saïd Jabbour and Lakhdar Saïs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************************/
/* importClauseSwitchMode : (Cooperation* coop)
Description :
In detreministic case, the two barriers guaranty that during import process no other thread can return to search.
Otherwise, each found a solution go out.*/
//=================================================================================================
#include "core/msg.h"
using namespace Minisat;
//XXX called only once by solver immediately after export
lbool Solver::importClauses(Cooperation* coop) {
//Control the limit size clause export
coop->updateLimitExportClauses(this);
switch(deterministic_mode){
//XXX Non-deterministic mode seems easiest?
case 0: // non deterministic case
{
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef)
return coop->answer(t);
//XXX A thread has found an answer. Broadcast via MPI. Maybe in calling function.
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
pull_clauses_from_remote(this, coop, this->threadId) ;
/* Lit** t = pull_clauses_from_remote(this->threadId) ; */
/* coop->importRemoteClauses(this, t) ; */
//extraUnits = pull_units_from_remote(this->threadId) ;
//coop->importExtraUnits(this, extraUnits);
break;
}
case 1: // deterministic case static frequency
{
if((int) conflicts % coop->initFreq == 0 || coop->answer(threadId) != l_Undef){
#pragma omp barrier
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
case 2: // deterministic case dynamic frequency
{
if(((int) conflicts % coop->deterministic_freq[threadId] == 0) || (coop->answer(threadId) != l_Undef)){ coop->learntsz[threadId] = nLearnts();
#pragma omp barrier
// each thread has its own frequency barrier synchronization
updateFrequency(coop);
coop->deterministic_freq[threadId] = updateFrequency(coop);
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
}
return l_Undef;
}
/*_________________________________________________________________________________________________
updateFrequency : (Cooperation* coop)
Description :
when det=2, each thread try to estimate the number of conflicts under which it must to join the barrier.
This estimation based on the calculus of the number of learnts clauses of all learnts and assume that
greater the learnts base slower is the unit propagation, which stay a not bad estimation.
*/
int Solver::updateFrequency(Cooperation* coop){
double freq = 0;
int maxLearnts = 0;
for(int t = 0; t < coop->nThreads(); t++)
if((int)coop->learntsz[t] > maxLearnts)
maxLearnts = (int)coop->learntsz[t];
freq = coop->initFreq + (double)coop->initFreq * (maxLearnts -learnts.size()) / maxLearnts;
return (int) freq;
}
/*_________________________________________________________________________________________________
uncheckedEnqueueImportedUnits : (Cooperation* coop)
Description :
At level 0, units literals propaged are exported to others threads
*/
void Solver::exportClause(Cooperation* coop, vec<Lit>& learnt_clause) {
if(coop->limitszClauses() < 1)
return;
if(decisionLevel() == 0){
for(int i = tailUnitLit; i < trail.size(); i++) {
coop->exportExtraUnit(this, trail[i]) ;
push_unit_remote(trail[i]) ;
}
tailUnitLit = trail.size();
}else {
coop->exportExtraClause(this, learnt_clause) ;
push_clause_remote(learnt_clause) ;
}
}
//=================================================================================================
// add Clauses received from others threads
CRef Solver::addExtraClause(vec<Lit>& lits){
CRef cr = ca.alloc(lits, true);
learnts.push(cr);
attachClause(cr);
claBumpActivity(ca[cr]);
return cr;
}
//=================================================================================================
// at level 0, unit extra clauses stored are propagated
void Solver::propagateExtraUnits(){
for(int i = 0; i < extraUnits.size(); i++)
if(value(extraUnits[i]) == l_Undef)
uncheckedEnqueue(extraUnits[i]);
}
|
hash_mult.h | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <immintrin.h>
#include <algorithm>
#ifdef KNL_EXE
#include <zmmintrin.h>
#else
#include <x86intrin.h>
#endif
#include "utility.h"
#include "CSR.h"
#include "BIN.h"
#define VECTORIZE
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to hash table size (=2^n)
#ifdef KNL_EXE
#define MIN_HT_S 16 // minimum hash table size per row in symbolic phase
#define MIN_HT_N 16 // minimum hash table size per row in numeric phase
#define VEC_LENGTH 16
#define VEC_LENGTH_BIT 4
#define VEC_LENGTH_LONG 8
#define VEC_LENGTH_LONG_BIT 3
#else
#define MIN_HT_S 8 // minimum hash table size per row in symbolic phase
#define MIN_HT_N 8 // minimum hash table size per row in numeric phase
#define VEC_LENGTH 8
#define VEC_LENGTH_BIT 3
#define VEC_LENGTH_LONG 4
#define VEC_LENGTH_LONG_BIT 2
#endif
/*
* Symbolic phase for Hash SpGEMM.
*/
template <class IT, class NT>
inline void hash_symbolic_kernel(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, BIN<IT, NT> &bin)
{
#pragma omp parallel
{
IT tid = omp_get_thread_num();
IT start_row = bin.rows_offset[tid];
IT end_row = bin.rows_offset[tid + 1];
IT *check = bin.local_hash_table_id[tid];
for (IT i = start_row; i < end_row; ++i) {
IT nz = 0;
IT bid = bin.bin_id[i];
if (bid > 0) {
IT ht_size = MIN_HT_S << (bid - 1); // determine hash table size for i-th row
for (IT j = 0; j < ht_size; ++j) { // initialize hash table
check[j] = -1;
}
for (IT j = arpt[i]; j < arpt[i + 1]; ++j) {
IT t_acol = acol[j];
for (IT k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
IT key = bcol[k];
IT hash = (key * HASH_SCAL) & (ht_size - 1);
while (1) { // Loop for hash probing
if (check[hash] == key) { // if the key is already inserted, it's ok
break;
}
else if (check[hash] == -1) { // if the key has not been inserted yet, then it's added.
check[hash] = key;
nz++;
break;
}
else { // linear probing: check next entry
hash = (hash + 1) & (ht_size - 1); //hash = (hash + 1) % ht_size
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#ifdef KNL_EXE
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX-512.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const int *arpt, const int *acol, const int *brpt, const int *bcol, BIN<int, NT> &bin)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi32(-1);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *check = bin.local_hash_table_id[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask16 mask_m;
#endif
int nz = 0;
int bid = bin.bin_id[i];
if (bid > 0) {
int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
int key = bcol[k];
int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi32(key);
#endif
while (1) { // Loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi32(check + hash);
mask_m = _mm512_cmp_epi32_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
break;
}
#else
bool flag = false;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi32_mask(check_m, init_m, _MM_CMPINT_NE)
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = VEC_LENGTH - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
check[hash + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH) & (table_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 64-bit integer with AVX-512.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const long long int *arpt, const long long int *acol, const long long int *brpt, const long long int *bcol, BIN<long long int, NT> &bin)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi64(-1);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *check = bin.local_hash_table_id[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask8 mask_m;
#endif
long long int nz = 0;
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (long long int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
long long int key = bcol[k];
long long int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_LONG_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi64(key);
#endif
while (1) { // loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi64(check + hash);
mask_m = _mm512_cmp_epi64_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
break;
}
#else
bool flag = false;
#pragma vector
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi64_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = VEC_LENGTH_LONG - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) { //if it is not filled, push the entry to the table
check[hash + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH_LONG) & (table_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#else
/*
* Symbolic phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX2.
*/
template <class NT>
inline void hash_symbolic_vec_kernel(const int *arpt, const int *acol, const int *brpt, const int *bcol, BIN<int, NT> &bin)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi32(-1);
const __m256i true_m = _mm256_set1_epi32(0xffffffff);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *check = bin.local_hash_table_id[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m;
__m256i mask_m;
int mask;
#endif
int nz = 0;
int bid = bin.bin_id[i];
if (bid > 0) {
int table_size = MIN_HT_S << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
int key = bcol[k];
int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi32(key);
#endif
while (1) { // Loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm256_maskload_epi32(check + (hash << VEC_LENGTH_BIT), true_m);
mask_m = _mm256_cmpeq_epi32(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
break;
}
#else
bool flag = false;
#pragma simd
for (int l = 0; l < VEC_LENGTH; ++l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi32(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 2;
#else
cur_nz = VEC_LENGTH;
#pragma simd
for (int l = VEC_LENGTH - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
check[(hash << VEC_LENGTH_BIT) + cur_nz] = key;
nz++;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
template <class NT>
inline void hash_symbolic_vec_kernel(const long long int *arpt, const long long int *acol, const long long int *brpt, const long long int *bcol, BIN<long long int, NT> &bin)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi64x(-1);
const __m256i true_m = _mm256_set1_epi64x(0xffffffffffffffff);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *check = bin.local_hash_table_id[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m;
__m256i mask_m;
int mask;
#endif
long long int nz = 0;
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int table_size = MIN_HT_S << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
long long int key = bcol[k];
long long int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi64x(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi64(check + (hash << VEC_LENGTH_LONG_BIT), true_m);
mask_m = _mm256_cmpeq_epi64(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
break;
}
#else
bool flag = false;
#pragma simd
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = true;
}
}
if (flag) {
break;
}
#endif
else {
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi64(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 3;
#else
cur_nz = VEC_LENGTH_LONG;
#pragma simd
for (int l = VEC_LENGTH_LONG - 1; l >= 0; --l) {
if (check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
check[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = key;
nz++;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
}
bin.row_nz[i] = nz;
}
}
}
#endif
// Reference function for Symbolic phase of Hash SpGEMM
template <bool vectorProbing, class IT, class NT>
inline void hash_symbolic(const IT *arpt, const IT *acol, const IT *brpt, const IT *bcol, IT *crpt, BIN<IT, NT> &bin, const IT nrow, IT *nnz)
{
if (vectorProbing) {
hash_symbolic_vec_kernel(arpt, acol, brpt, bcol, bin);
}
else {
hash_symbolic_kernel(arpt, acol, brpt, bcol, bin);
}
/* Set row pointer of matrix C */
scan(bin.row_nz, crpt, nrow + 1);
*nnz = crpt[nrow];
}
/*
* Used for sort function.
* Elements are sorted in ascending order.
*/
template <typename IT, typename NT>
bool sort_less(const pair<IT, NT> &left,const pair<IT, NT> &right)
{
return left.first < right.first;
}
/*
* After calculating on each hash table, sort them in ascending order if necessary, and then store them as output matrix
* This function is used in hash_numeric* function.
* the actual indices of colids and values of output matrix are rpt[rowid];
*/
template <bool sortOutput, typename IT, typename NT>
inline void sort_and_store_table2mat(IT *ht_check, NT *ht_value, IT *colids, NT * values, IT nz, IT ht_size)
{
IT index = 0;
// Sort elements in ascending order if necessary, and store them as output matrix
if (sortOutput) {
vector<pair<IT, NT>> p_vec(nz);
for (IT j = 0; j < ht_size; ++j) { // accumulate non-zero entry from hash table
if (ht_check[j] != -1) {
p_vec[index++] = make_pair(ht_check[j], ht_value[j]);
}
}
sort(p_vec.begin(), p_vec.end(), sort_less<IT, NT>); // sort only non-zero elements
for (IT j = 0; j < index; ++j) { // store the results
colids[j] = p_vec[j].first;
values[j] = p_vec[j].second;
}
}
else {
for (IT j = 0; j < ht_size; ++j) {
if (ht_check[j] != -1) {
colids[index] = ht_check[j];
values[index] = ht_value[j];
index++;
}
}
}
}
/*
* Numeric phase in Hash SpGEMM.
*/
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric(const IT *arpt, const IT *acol, const NT *aval, const IT *brpt, const IT *bcol, const NT *bval, const IT *crpt, IT *ccol, NT *cval, const BIN<IT, NT> &bin, const MultiplyOperation multop, const AddOperation addop)
{
#pragma omp parallel
{
IT tid = omp_get_thread_num();
IT start_row = bin.rows_offset[tid];
IT end_row = bin.rows_offset[tid + 1];
IT *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (IT i = start_row; i < end_row; ++i) {
IT bid = bin.bin_id[i];
if (bid > 0) {
IT offset = crpt[i];
IT ht_size = MIN_HT_N << (bid - 1);
for (IT j = 0; j < ht_size; ++j) {
ht_check[j] = -1;
}
for (IT j = arpt[i]; j < arpt[i + 1]; ++j) {
IT t_acol = acol[j];
NT t_aval = aval[j];
for (IT k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
IT key = bcol[k];
IT hash = (key * HASH_SCAL) & (ht_size - 1);
while (1) { // Loop for hash probing
if (ht_check[hash] == key) { // key is already inserted
ht_value[hash] = addop(t_val, ht_value[hash]);
break;
}
else if (ht_check[hash] == -1) { // insert new entry
ht_check[hash] = key;
ht_value[hash] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1); // (hash + 1) % ht_size
}
}
}
}
sort_and_store_table2mat<sortOutput, IT, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#ifdef KNL_EXE
/*
* Numeric phase for Hash Vector SpGEMM
* This function is optimized for 32-bit integer with AVX-512.
*/
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const int *arpt, const int *acol, const NT *aval, const int *brpt, const int *bcol, const NT *bval, const int *crpt, int *ccol, NT *cval, const BIN<int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi32(-1);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask16 mask_m, k_m;
#endif
int bid = bin.bin_id[i];
if (bid > 0) {
int offset = crpt[i];
int table_size = MIN_HT_N << (bid - 1); // the number of entries per table
int ht_size = table_size >> VEC_LENGTH_BIT; // the number of chunks (1 chunk = VEC_LENGTH elments)
for (int j = 0; j < table_size; ++j) {
ht_check[j] = -1; // initialize hash table
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
NT t_aval = aval[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
int key = bcol[k];
int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi32(key);
#endif
while (1) { // loop for hash probing
// check whether the key is in hash table.
#ifdef VECTORIZE
check_m = _mm512_load_epi32(ht_check + hash);
mask_m = _mm512_cmp_epi32_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
int target = __builtin_ctz(mask_m);
ht_value[hash + target] += t_val;
break;
}
#else
int flag = -1;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[hash + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[hash + flag] += t_val;
break;
}
#endif
else {
// If the entry with same key cannot be found, check whether the chunk is filled or not
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi32_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH;
#pragma vector
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[hash + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH) { //if it is not filled, push the entry to the table
ht_check[hash + cur_nz] = key;
ht_value[hash + cur_nz] = t_val;
break;
}
else { // if is filled, check next chunk (linear probing)
hash = (hash + VEC_LENGTH) & (table_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
/*
* Numeric phase for Hash Vector SpGEMM
* This function is optimized for 64-bit integer with AVX-512.
*/
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const long long int *arpt, const long long int *acol, const NT *aval, const long long int *brpt, const long long int *bcol, const NT *bval, const long long int *crpt, long long int *ccol, NT *cval, const BIN<long long int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m512i init_m = _mm512_set1_epi64(-1);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m512i key_m, check_m;
__mmask8 mask_m, k_m;
#endif
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int offset = crpt[i];
long long int table_size = MIN_HT_N << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
NT t_aval = aval[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
long long int key = bcol[k];
long long int hash = ((key * HASH_SCAL) & (ht_size - 1)) << VEC_LENGTH_LONG_BIT;
#ifdef VECTORIZE
key_m = _mm512_set1_epi64(key);
#endif
while (1) { // loop for hash probing
#ifdef VECTORIZE
check_m = _mm512_load_epi64(ht_check + hash);
mask_m = _mm512_cmp_epi64_mask(key_m, check_m, _MM_CMPINT_EQ);
if (mask_m != 0) {
long long int target = __builtin_ctz(mask_m);
ht_value[hash + target] += t_val;
break;
}
#else
long long int flag = -1;
#pragma vector
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[hash + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[hash + flag] += t_val;
break;
}
#endif
else {
long long int cur_nz;
#ifdef VECTORIZE
mask_m = _mm512_cmp_epi64_mask(check_m, init_m, _MM_CMPINT_NE);
cur_nz = _popcnt32(mask_m);
#else
cur_nz = VEC_LENGTH_LONG;
#pragma vector
for (IT l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[hash + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
ht_check[hash + cur_nz] = key;
ht_value[hash + cur_nz] = t_val;
break;
}
else {
hash = (hash + VEC_LENGTH_LONG) & (table_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, long long int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#else
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const int *arpt, const int *acol, const NT *aval, const int *brpt, const int *bcol, const NT *bval, const int *crpt, int *ccol, NT *cval, const BIN<int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi32(-1);
const __m256i true_m = _mm256_set1_epi32(0xffffffff);
#endif
#pragma omp parallel
{
int tid = omp_get_thread_num();
int start_row = bin.rows_offset[tid];
int end_row = bin.rows_offset[tid + 1];
int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m, mask_m;
int mask;
#endif
int bid = bin.bin_id[i];
if (bid > 0) {
int offset = crpt[i];
int table_size = MIN_HT_N << (bid - 1);
int ht_size = table_size >> VEC_LENGTH_BIT;
for (int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (int j = arpt[i]; j < arpt[i + 1]; ++j) {
int t_acol = acol[j];
NT t_aval = aval[j];
for (int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
int key = bcol[k];
int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi32(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi32(ht_check + (hash << VEC_LENGTH_BIT), true_m);
mask_m = _mm256_cmpeq_epi32(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
int target = __builtin_ctz(mask) >> 2;
ht_value[(hash << VEC_LENGTH_BIT) + target] += t_val;
break;
}
#else
int flag = -1;
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[(hash << VEC_LENGTH_BIT) + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[(hash << VEC_LENGTH_BIT) + flag] += t_val;
break;
}
#endif
else {
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi32(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 2;
#else
cur_nz = VEC_LENGTH;
for (int l = 0; l < VEC_LENGTH; ++l) {
if (ht_check[(hash << VEC_LENGTH_BIT) + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH) {
ht_check[(hash << VEC_LENGTH_BIT) + cur_nz] = key;
ht_value[(hash << VEC_LENGTH_BIT) + cur_nz] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
template <bool sortOutput, typename NT, typename MultiplyOperation, typename AddOperation>
inline void hash_numeric_vec(const long long int *arpt, const long long int *acol, const NT *aval, const long long int *brpt, const long long int *bcol, const NT *bval, const long long int *crpt, long long int *ccol, NT *cval, const BIN<long long int, NT> &bin, MultiplyOperation multop, AddOperation addop)
{
#ifdef VECTORIZE
const __m256i init_m = _mm256_set1_epi64x(-1);
const __m256i true_m = _mm256_set1_epi64x(0xffffffffffffffff);
#endif
#pragma omp parallel
{
long long int tid = omp_get_thread_num();
long long int start_row = bin.rows_offset[tid];
long long int end_row = bin.rows_offset[tid + 1];
long long int *ht_check = bin.local_hash_table_id[tid];
NT *ht_value = bin.local_hash_table_val[tid];
for (long long int i = start_row; i < end_row; ++i) {
#ifdef VECTORIZE
__m256i key_m, check_m, mask_m;
int mask;
#endif
long long int bid = bin.bin_id[i];
if (bid > 0) {
long long int offset = crpt[i];
long long int table_size = MIN_HT_N << (bid - 1);
long long int ht_size = table_size >> VEC_LENGTH_LONG_BIT;
for (long long int j = 0; j < table_size; ++j) {
ht_check[j] = -1;
}
for (long long int j = arpt[i]; j < arpt[i + 1]; ++j) {
long long int t_acol = acol[j];
NT t_aval = aval[j];
for (long long int k = brpt[t_acol]; k < brpt[t_acol + 1]; ++k) {
NT t_val = multop(t_aval, bval[k]);
long long int key = bcol[k];
long long int hash = (key * HASH_SCAL) & (ht_size - 1);
#ifdef VECTORIZE
key_m = _mm256_set1_epi64x(key);
#endif
while (1) {
#ifdef VECTORIZE
check_m = _mm256_maskload_epi64(ht_check + (hash << VEC_LENGTH_LONG_BIT), true_m);
mask_m = _mm256_cmpeq_epi64(key_m, check_m);
mask = _mm256_movemask_epi8(mask_m);
if (mask != 0) {
int target = __builtin_ctz(mask) >> 3;
ht_value[(hash << VEC_LENGTH_LONG_BIT) + target] += t_val;
break;
}
#else
int flag = -1;
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[(hash << VEC_LENGTH_LONG_BIT) + l] == key) {
flag = l;
}
}
if (flag >= 0) {
ht_value[(hash << VEC_LENGTH_LONG_BIT) + flag] += t_val;
break;
}
#endif
else {
int cur_nz;
#ifdef VECTORIZE
mask_m = _mm256_cmpeq_epi64(check_m, init_m);
mask = _mm256_movemask_epi8(mask_m);
cur_nz = (32 - _popcnt32(mask)) >> 3;
#else
cur_nz = VEC_LENGTH_LONG;
for (int l = 0; l < VEC_LENGTH_LONG; ++l) {
if (ht_check[(hash << VEC_LENGTH_LONG_BIT) + l] == -1) {
cur_nz = l;
break;
}
}
#endif
if (cur_nz < VEC_LENGTH_LONG) {
ht_check[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = key;
ht_value[(hash << VEC_LENGTH_LONG_BIT) + cur_nz] = t_val;
break;
}
else {
hash = (hash + 1) & (ht_size - 1);
}
}
}
}
}
sort_and_store_table2mat<sortOutput, long long int, NT>(ht_check, ht_value,
ccol + offset, cval + offset,
crpt[i + 1] - offset, ht_size);
}
}
}
}
#endif
/*
* Executing Hash SpGEMM
* The function starts with initialization of hash table followed by symbolic phase and numeric phase with hash table.
*/
template <bool vectorProbing, bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
BIN<IT, NT> bin(a.rows, MIN_HT_S);
c.rows = a.rows;
c.cols = b.cols;
c.zerobased = true;
/* Set max bin */
bin.set_max_bin(a.rowptr, a.colids, b.rowptr, c.rows, c.cols);
/* Create hash table (thread local) */
bin.create_local_hash_table(c.cols);
/* Symbolic Phase */
c.rowptr = my_malloc<IT>(c.rows + 1);
hash_symbolic<vectorProbing>(a.rowptr, a.colids, b.rowptr, b.colids, c.rowptr, bin, c.rows, &(c.nnz));
c.colids = my_malloc<IT>(c.nnz);
c.values = my_malloc<NT>(c.nnz);
/* Numeric Phase */
if (vectorProbing) {
hash_numeric_vec<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop);
}
else {
hash_numeric<sortOutput>(a.rowptr, a.colids, a.values, b.rowptr, b.colids, b.values, c.rowptr, c.colids, c.values, bin, multop, addop);
}
}
/*
* Hash SpGEMM functions called without full template values
*/
template <bool sortOutput, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
HashSpGEMM<false, sortOutput, IT, NT, MultiplyOperation, AddOperation>(a, b, c, multop, addop);
}
template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSR<IT, NT> &a, const CSR<IT, NT> &b, CSR<IT, NT> &c, MultiplyOperation multop, AddOperation addop)
{
HashSpGEMM<false, true, IT, NT, MultiplyOperation, AddOperation>(a, b, c, multop, addop);
}
|
tests.c | #include "tests.h"
#include "linalg.h"
#include "projector.h"
#include "reader.h"
#include "sbt.h"
#include "utils.h"
#include <assert.h>
#include <complex.h>
#include <math.h>
#include <mkl.h>
#include <mkl_types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.14159265359
double Ylmr(int l, int m, double theta, double phi) {
return creal(Ylm(l, m, theta, phi));
}
double Ylmi(int l, int m, double theta, double phi) {
return cimag(Ylm(l, m, theta, phi));
}
double *get_sbtd_ks(sbt_descriptor_t *d) { return d->ks; }
int fft_check(char *wavecar, double *kpt_weights, int *fftg) {
setbuf(stdout, NULL);
pswf_t *wf = read_wavefunctions(wavecar, kpt_weights);
double complex *x = (double complex *)mkl_calloc(fftg[0] * fftg[1] * fftg[2],
sizeof(double complex), 64);
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[0]->k, wf->kpts[0]->Gs,
wf->kpts[0]->bands[0]->Cs, wf->kpts[0]->bands[0]->num_waves, fftg);
int *Gs = wf->kpts[0]->Gs;
float complex *Cs = wf->kpts[0]->bands[0]->Cs;
double inv_sqrt_vol = pow(determinant(wf->lattice), -0.5);
double total1 = 0;
double total2 = 0;
double total3 = 0;
for (int i = 0; i < fftg[0]; i++) {
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
double f1 = (double)i / fftg[0];
double f2 = (double)j / fftg[1];
double f3 = (double)k / fftg[2];
double complex temp = 0;
for (int w = 0; w < wf->kpts[0]->bands[0]->num_waves; w++) {
temp += Cs[w] * cexp((f1 * (Gs[3 * w]) + f2 * (Gs[3 * w + 1]) +
f3 * (Gs[3 * w + 2])) *
2 * PI * I);
if (i == 0 && j == 0 && k == 0)
total3 += pow(cabs(Cs[w]), 2);
}
temp *= inv_sqrt_vol;
int ind = i * fftg[1] * fftg[2] + j * fftg[2] + k;
total1 += pow(cabs(x[ind]), 2);
total2 += pow(cabs(temp), 2);
if (cabs(x[ind] - temp) > 1e-5)
return -1;
}
}
}
printf("FFTCHECK ASSERTS\n");
float complex *CAs =
(float complex *)calloc(wf->kpts[0]->num_waves, sizeof(float complex));
fwd_fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[0]->k, wf->kpts[0]->Gs, CAs,
wf->kpts[0]->bands[0]->num_waves, fftg);
for (int w = 0; w < wf->kpts[0]->num_waves; w++) {
if (cabs(CAs[w] - wf->kpts[0]->bands[0]->Cs[w]) > 1e-5)
return -2;
}
free(CAs);
mkl_free(x);
return 0;
}
void proj_check(int BAND_NUM, int KPOINT_NUM, pswf_t *wf, int *fftg,
int *labels, double *coords) {
ppot_t *pps = wf->pps;
double complex *x =
mkl_calloc(fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64);
// printf("START FT\n");
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs,
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->num_waves, fftg);
// printf("FINISH FT\n");
double *lattice = wf->lattice;
double vol = determinant(lattice);
double dv = vol / (fftg[0] * fftg[1] * fftg[2]);
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0, 0, 0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
frac[0] = (double)i / fftg[0];
frac[1] = (double)j / fftg[1];
frac[2] = (double)k / fftg[2];
kdotr = dot(wf->kpts[KPOINT_NUM]->k, frac);
x[i * fftg[1] * fftg[2] + j * fftg[2] + k] *= cexp(2 * PI * I * kdotr);
}
}
}
double complex *y = (double complex *)malloc(fftg[0] * fftg[1] * fftg[2] *
sizeof(double complex));
memcpy(y, x, fftg[0] * fftg[1] * fftg[2] * sizeof(double complex));
double err = 0, err2 = 0;
double normx = 0, normy = 0;
int num_sites = wf->num_sites;
#pragma omp parallel for
for (int p = 0; p < num_sites; p++) {
int ind;
double serr = 0, serr2 = 0;
double snormx = 0, snormy = 0;
projection_t pros = wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->projections[p];
// printf("READ PROJECTIONS\n");
ppot_t pp = pps[labels[p]];
double rmax = pp.wave_grid[pp.wave_gridsize - 1];
double res[3] = {0, 0, 0};
vcross(res, lattice + 3, lattice + 6);
int grid1 = (int)(mag(res) * rmax / vol * fftg[0]) + 1;
vcross(res, lattice + 0, lattice + 6);
int grid2 = (int)(mag(res) * rmax / vol * fftg[1]) + 1;
vcross(res, lattice + 0, lattice + 3);
int grid3 = (int)(mag(res) * rmax / vol * fftg[2]) + 1;
int center1 = (int)round(coords[3 * p + 0] * fftg[0]);
int center2 = (int)round(coords[3 * p + 1] * fftg[1]);
int center3 = (int)round(coords[3 * p + 2] * fftg[2]);
// printf("FINISH SETUP %d\n%d %d %d\n%d %d %d\n",p, center1, center2,
// center3, grid1, grid2, grid3);
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
double frac[3] = {0, 0, 0};
double testcoord[3] = {0, 0, 0};
int ii = 0, jj = 0, kk = 0;
double phasecoord[3] = {0, 0, 0};
double phase = 0;
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double)i / fftg[0] - coords[3 * p + 0];
testcoord[1] = (double)j / fftg[1] - coords[3 * p + 1];
testcoord[2] = (double)k / fftg[2] - coords[3 * p + 2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < rmax) {
ii = (i % fftg[0] + fftg[0]) % fftg[0];
jj = (j % fftg[1] + fftg[1]) % fftg[1];
kk = (k % fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double)ii / fftg[0];
frac[1] = (double)jj / fftg[1];
frac[2] = (double)kk / fftg[2];
phasecoord[0] = coords[3 * p + 0] + ((ii - i) / fftg[0]);
phasecoord[1] = coords[3 * p + 1] + ((jj - j) / fftg[1]);
phasecoord[2] = coords[3 * p + 2] + ((kk - k) / fftg[2]);
phase = dot(phasecoord, wf->kpts[KPOINT_NUM]->k);
ind = ii * fftg[1] * fftg[2] + jj * fftg[2] + kk;
x[ii * fftg[1] * fftg[2] + jj * fftg[2] + kk] = 0;
for (int n = 0; n < pros.total_projs; n++) {
x[ii * fftg[1] * fftg[2] + jj * fftg[2] + kk] +=
wave_value2(pp.wave_grid, pp.funcs[pros.ns[n]].pswave,
pp.funcs[pros.ns[n]].pswave_spline,
pp.wave_gridsize, pros.ls[n], pros.ms[n],
testcoord) *
pros.overlaps[n] * cexp(2 * PI * I * phase);
}
serr += pow(cabs(x[ind] - y[ind]), 2);
serr2 += pow(cabs(x[ind]) - cabs(y[ind]), 2);
snormx += pow(cabs(x[ind]), 2);
snormy += pow(cabs(y[ind]), 2);
}
}
}
}
#pragma omp critical
{
err += serr;
err2 += serr2;
normx += snormx;
normy += snormy;
}
}
printf("err magerr, normx normy %lf %lf %lf %lf\n", err / normy, err2 / normy,
normx, normy);
mkl_free(x);
free(y);
}
|
clang-262701.c | #include <stdio.h>
#include <string.h>
#include <omp.h>
#define THREADS 2
#define TEAMS 2
int main(){
int gpu_results[THREADS];
int correct_results[THREADS] = {2,2};
#pragma omp target teams thread_limit(THREADS) num_teams(TEAMS) map(from:gpu_results)
{
int dist[THREADS];
// Uncomment line below to trigger generic kernel before fix was in place
//dist[0] = 0;
#pragma omp parallel
{
int thread = omp_get_thread_num();
int team = omp_get_team_num();
dist[thread] = 0;
#pragma omp barrier
dist[thread] += 1;
#pragma omp barrier
if(thread == 0) {
for(int i = 1; i < THREADS; i++)
dist[0] += dist[i];
gpu_results[team] = dist[0];
}
}
}
int status = memcmp(correct_results, gpu_results, THREADS * sizeof(int));
if (status != 0){
printf("FAIL\n");
return 1;
}
printf("PASS\n");
return 0;
}
|
int_array.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "_hypre_utilities.hpp"
/******************************************************************************
*
* Routines for hypre_IntArray struct for holding an array of integers
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* hypre_IntArrayCreate
*--------------------------------------------------------------------------*/
hypre_IntArray *
hypre_IntArrayCreate( HYPRE_Int size )
{
hypre_IntArray *array;
array = hypre_CTAlloc(hypre_IntArray, 1, HYPRE_MEMORY_HOST);
hypre_IntArrayData(array) = NULL;
hypre_IntArraySize(array) = size;
hypre_IntArrayMemoryLocation(array) = hypre_HandleMemoryLocation(hypre_handle());
return array;
}
/*--------------------------------------------------------------------------
* hypre_IntArrayDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IntArrayDestroy( hypre_IntArray *array )
{
HYPRE_Int ierr=0;
if (array)
{
HYPRE_MemoryLocation memory_location = hypre_IntArrayMemoryLocation(array);
hypre_TFree(hypre_IntArrayData(array), memory_location);
hypre_TFree(array, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_IntArrayInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IntArrayInitialize_v2( hypre_IntArray *array, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_IntArraySize(array);
HYPRE_Int ierr = 0;
hypre_IntArrayMemoryLocation(array) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_IntArrayData(array) )
{
hypre_IntArrayData(array) = hypre_CTAlloc(HYPRE_Int, size, memory_location);
}
return ierr;
}
HYPRE_Int
hypre_IntArrayInitialize( hypre_IntArray *array )
{
HYPRE_Int ierr;
ierr = hypre_IntArrayInitialize_v2( array, hypre_IntArrayMemoryLocation(array) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_IntArrayCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IntArrayCopy( hypre_IntArray *x,
hypre_IntArray *y )
{
HYPRE_Int ierr = 0;
size_t size = hypre_min( hypre_IntArraySize(x), hypre_IntArraySize(y) );
hypre_TMemcpy( hypre_IntArrayData(y),
hypre_IntArrayData(x),
HYPRE_Int,
size,
hypre_IntArrayMemoryLocation(y),
hypre_IntArrayMemoryLocation(x) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_IntArrayCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_IntArray *
hypre_IntArrayCloneDeep_v2( hypre_IntArray *x, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_IntArraySize(x);
hypre_IntArray *y = hypre_IntArrayCreate( size );
hypre_IntArrayInitialize_v2(y, memory_location);
hypre_IntArrayCopy( x, y );
return y;
}
hypre_IntArray *
hypre_IntArrayCloneDeep( hypre_IntArray *x )
{
return hypre_IntArrayCloneDeep_v2(x, hypre_IntArrayMemoryLocation(x));
}
/*--------------------------------------------------------------------------
* hypre_IntArraySetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IntArraySetConstantValues( hypre_IntArray *v,
HYPRE_Int value )
{
HYPRE_Int *array_data = hypre_IntArrayData(v);
HYPRE_Int size = hypre_IntArraySize(v);
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (size > 0)
{
HYPRE_THRUST_CALL( fill_n, array_data, size, value );
}
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(array_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
array_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */
#if defined(HYPRE_USING_GPU)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
return ierr;
}
|
opmphm.c | /**
* @file
*
* @brief The Order Preserving Minimal Perfect Hash Map C benchmark.
*
* @copyright BSD License (see LICENSE.md or https://www.libelektra.org)
*/
// ==== DEFINE SECTION ====
#define _GNU_SOURCE
#define KDBRAND_BENCHMARK // allows the seed injection into Elektra
// uncomment to use OPENMP and set USE_OPENMP in CMakeLists.txt
//~ #define USE_OPENMP
#ifdef USE_OPENMP
// set here you number of threads
#define NUMBEROFTHREADS 8
#else
#define NUMBEROFTHREADS 1
#endif
// ==== INCLUDE SECTION ====
#include "benchmarks.h"
#ifdef HAVE_HSEARCHR
#include <search.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
#include "../src/libs/elektra/opmphm.c"
#include "../src/libs/elektra/opmphmpredictor.c"
#include "../src/libs/elektra/rand.c"
#include <sys/time.h>
int32_t elektraRandBenchmarkInitSeed;
// benchmarks helpers
static int32_t * getRandomSeed (int32_t * seed);
static FILE * openOutFileWithRPartitePostfix (const char * name, uint8_t r);
static const char * getString (void * data);
static size_t getPower (size_t p, size_t q);
static int cmpInteger (const void * a, const void * b);
// generate KeySets
static KeySetShape * getKeySetShapes (void);
const size_t numberOfShapes = 8;
/**
* General structure of a benchmark
*
* `name` is a unique name of the benchmark and `benchmarkF` is the independent function executing the benchmark.
* Execute a benchmark with benchmark_opmphm `name`.
*/
typedef struct
{
char * name;
size_t numberOfSeedsNeeded;
void (*benchmarkF) (char *);
} Benchmark;
/**
* START ======================================= Measures the Opmphm Hash Function time ============================================== START
*
* This benchmark measures the time for hashing a whole KeySet, variegating in the size. Executed multiple times.
*
* The output has the following header: n;n;n;n;... (for each KeySetShape)
*
* This benchmark takes numberOfShapes * nCount seeds
*/
static void benchmarkHashFunctionTime (char * name)
{
const size_t nCount = 4;
const size_t n[] = { 10, 100, 1000, 10000 };
const size_t runs = 11;
// init results
size_t * results = elektraMalloc (nCount * numberOfShapes * runs * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// benchmark
printf ("Run Benchmark %s:\n", name);
KeySetShape * keySetShapes = getKeySetShapes ();
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
printf ("now at n: %zu/%zu shape: %zu/%zu\r", i, nCount, s, numberOfShapes);
fflush (stdout);
int32_t seed;
if (getRandomSeed (&seed) != &seed) printExit ("Seed Parsing Error or feed me more seeds");
KeySet * ks = generateKeySet (n[i], &seed, &keySetShapes[s]);
for (size_t r = 0; r < runs; ++r)
{
Key * key;
ksRewind (ks);
struct timeval start;
struct timeval end;
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// measure
while ((key = ksNext (ks)))
{
__asm__("");
opmphmHashfunction (keyName (key), strlen (keyName (key)), 1337);
__asm__("");
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
results[i * (numberOfShapes * runs) + s * runs + r] =
(end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
ksDel (ks);
}
}
elektraFree (keySetShapes);
// wirte out results
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_hashfunctiontime", 0);
if (!out)
{
printExit ("open out file");
}
// print header
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
if (!s && !i)
{
fprintf (out, "%zu", n[i]);
}
else
{
fprintf (out, ";%zu", n[i]);
}
}
}
fprintf (out, "\n");
// print data
for (size_t r = 0; r < runs; ++r)
{
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
if (!s && !i)
{
fprintf (out, "%zu", results[i * (numberOfShapes * runs) + s * runs + r]);
}
else
{
fprintf (out, ";%zu", results[i * (numberOfShapes * runs) + s * runs + r]);
}
}
}
fprintf (out, "\n");
}
fclose (out);
elektraFree (results);
}
/**
* END ========================================= Measures the Opmphm Hash Function time ================================================ END
*/
/**
* START ======================================================= Mapping ============================================================= START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size (n) and space influencing parameter (c).
* First the KeySets are build, for every KeySet size (n) there are numberOfShapes * keySetsPerShape KeySets.
* Then the benchmarking for every KeySet size (n) and space influencing parameter (c) takes place, with a fixed set of seeds for
* the opmphmMapping (...) invocations.
* At the end the results are written out in the following format:
*
* trials;n_%zuc_%f;... (each n and c are unique)
*
* The number of needed seeds for this benchmarks is: nCount * numberOfShapes * keySetsPerShape (KeySets generation) + numberOfSeeds (tested
* seeds)
*/
static void benchmarkMappingCheckOpmphm (Opmphm * opmphm, OpmphmGraph * graph, size_t n, OpmphmInit * init, size_t mappings,
size_t maxMappings)
{
if (n < 5 && mappings != maxMappings)
{
// assign
if (opmphmAssignment (opmphm, graph, n, 1))
{
printExit ("check assignment failed");
}
for (size_t i = 0; i < n; ++i)
{
if (i != opmphmLookup (opmphm, n, init->getName (init->data[i])))
{
printExit ("check assignment failed");
}
}
opmphmClear (opmphm);
}
}
static void benchmarkMapping (char * name)
{
size_t rUniPar = 3;
const size_t nCount = 15;
const size_t n[] = { 10, 15, 20, 30, 40, 60, 80, 120, 160, 240, 320, 480, 640, 960, 1280 }; // 15
const size_t cCount = 15;
const double c[] = { 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 }; // 15
const size_t keySetsPerShape = 20;
const size_t numberOfKeySets = nCount * numberOfShapes * keySetsPerShape;
const size_t numberOfSeeds = 10000;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init seed population, used for opmphmMapping (...) invocation.
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// get seeds
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
// init results
size_t * results = elektraMalloc (nCount * cCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * cCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (numberOfKeySets * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
printf ("KeySet Cache Build:\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
for (size_t ksPshapeI = 0; ksPshapeI < keySetsPerShape; ++ksPshapeI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI * (numberOfShapes * keySetsPerShape) + shapeI * keySetsPerShape + ksPshapeI] =
generateKeySet (n[nI], &genSeed, &keySetShapes[shapeI]);
}
}
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split
if (numberOfSeeds % NUMBEROFTHREADS != 0) printExit ("seeds % NUMBEROFTHREADS != 0");
size_t partSize = numberOfSeeds / NUMBEROFTHREADS;
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * cCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
// and cCount
for (size_t cI = 0; cI < cCount; ++cI)
{
printf ("now at: n = %zu/%zu c = %zu/%zu\r", nI + 1, nCount, cI + 1, cCount);
fflush (stdout);
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
graphs[i] = opmphmGraphNew (opmphms[i], rUniPar, n[nI], opmphmMinC (rUniPar) + c[cI]);
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
// go through all KeySets from n
for (size_t ksCacheI = 0; ksCacheI < numberOfShapes * keySetsPerShape; ++ksCacheI)
{
KeySet * ks = keySetsCache[nI * (numberOfShapes * keySetsPerShape) + ksCacheI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * cCount * maxMappings * sizeof (size_t));
// try each seed part
for (size_t seedI = threadI * partSize; seedI < (threadI + 1) * partSize; ++seedI)
{
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seeds[seedI];
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// check opmphm
benchmarkMappingCheckOpmphm (opmphms[threadI], graphs[threadI], n[nI], &init, mappings,
maxMappings);
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * (cCount * maxMappings) + cI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * cCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n and c should have in sum (numberOfShapes * keySetsPerShape) for each KeySet times (numberOfSeeds) seeds trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * (cCount * maxMappings) + cI * maxMappings + mappingI];
}
if (sum != numberOfShapes * keySetsPerShape * numberOfSeeds)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_mapping", rUniPar);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
fprintf (out, ";n_%zuc_%f", n[nI], opmphmMinC (rUniPar) + c[cI]);
}
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
fprintf (out, ";%zu", results[nI * (cCount * maxMappings) + cI * maxMappings + mappingI]);
}
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < numberOfKeySets; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (seeds);
elektraFree (results);
}
/**
* END ========================================================= Mapping =============================================================== END
*/
/**
* START ============================================== Mapping with Optimization ==================================================== START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size.
* First the KeySets are build, for every KeySet size (n) there are numberOfShapes * keySetsPerShape KeySets.
* Then the benchmarking for every KeySet size (n) takes place, with a fixed set of seeds for the opmphmMapping (...) invocations.
* At the end the results are written out in the following format:
*
* trials;n_%zur_%uc_%f;... (each n is unique)
*
* The number of needed seeds for this benchmarks is: nCount * numberOfShapes * keySetsPerShape (KeySets generation) + numberOfSeeds (tested
* seeds)
*/
static void benchmarkMappingOpt (char * name)
{
// create the n array
const size_t nCount = 132;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
size_t controlCount = 0;
for (size_t i = 2; i <= 38; ++i)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 39; i <= 239; i = i + 5)
{
n[controlCount] = i;
++controlCount;
}
n[controlCount] = 240;
++controlCount;
for (size_t i = 259; i <= 1279; i = i + 20)
{
n[controlCount] = i;
++controlCount;
}
n[controlCount] = 1280;
++controlCount;
if (controlCount != nCount)
{
printExit ("controlCount != nCount");
}
const size_t keySetsPerShape = 70;
const size_t numberOfKeySets = nCount * numberOfShapes * keySetsPerShape;
const size_t numberOfSeeds = 20000;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init seed population, used for opmphmMapping (...) invocation.
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// get seeds
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
// init results
size_t * results = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (numberOfKeySets * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
printf ("KeySet Cache Build:\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
for (size_t ksPshapeI = 0; ksPshapeI < keySetsPerShape; ++ksPshapeI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI * (numberOfShapes * keySetsPerShape) + shapeI * keySetsPerShape + ksPshapeI] =
generateKeySet (n[nI], &genSeed, &keySetShapes[shapeI]);
}
}
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split
if (numberOfSeeds % NUMBEROFTHREADS != 0) printExit ("seeds % NUMBEROFTHREADS != 0");
size_t partSize = numberOfSeeds / NUMBEROFTHREADS;
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: n = %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
uint8_t r = opmphmOptR (n[nI]);
graphs[i] = opmphmGraphNew (opmphms[i], r, n[nI], opmphmMinC (r) + opmphmOptC (n[nI]));
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
// go through all KeySets from n
for (size_t ksCacheI = 0; ksCacheI < numberOfShapes * keySetsPerShape; ++ksCacheI)
{
KeySet * ks = keySetsCache[nI * (numberOfShapes * keySetsPerShape) + ksCacheI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * maxMappings * sizeof (size_t));
// try each seed part
for (size_t seedI = threadI * partSize; seedI < (threadI + 1) * partSize; ++seedI)
{
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seeds[seedI];
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// check assignment
if (nI < 5 && mappings != maxMappings)
{
// assign
if (opmphmAssignment (opmphms[threadI], graphs[threadI], n[nI], 1))
{
printExit ("check assignment failed");
}
for (size_t i = 0; i < n[nI]; ++i)
{
if (i != opmphmLookup (opmphms[threadI], n[nI], init.getName (init.data[i])))
{
printExit ("check assignment failed");
}
}
opmphmClear (opmphms[threadI]);
}
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n should have in sum (numberOfShapes * keySetsPerShape) for each KeySet times (numberOfSeeds) seeds trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * maxMappings + mappingI];
}
if (sum != numberOfShapes * keySetsPerShape * numberOfSeeds)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
// write out
FILE * out = fopen ("benchmark_opmphm_mapping_opt.csv", "w");
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";n_%zur_%uc_%f", n[nI], opmphmOptR (n[nI]), opmphmMinC (opmphmOptR (n[nI])) + opmphmOptC (n[nI]));
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";%zu", results[nI * maxMappings + mappingI]);
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < numberOfKeySets; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (n);
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (seeds);
elektraFree (results);
}
/**
* END ================================================ Mapping with Optimization ====================================================== END
*/
/**
* START ================================================== Mapping All Seeds ======================================================== START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size and all seeds.
* First the KeySets are build, for every KeySet size (n). Then the benchmarking for every KeySet size (n) takes place,
* the seeds start at 1 and go to ELEKTRARANDMAX - 1 = 2147483646.
* At the end the results are written out in the following format:
*
* trials;n_%zur_%uc_%f;... (each n is unique)
*
* The number of needed seeds for this benchmarks is: nCount (KeySets generation)
*/
static void benchmarkMappingAllSeeds (char * name)
{
// create the n array
const size_t nCount = 7;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
n[0] = 9;
n[1] = 29;
n[2] = 49;
n[3] = 69;
n[4] = 89;
n[5] = 109;
n[6] = 129;
// seeds limits
const int32_t startSeed = 1;
const int32_t endSeed = ELEKTRARANDMAX - 1; // = ELEKTRARANDMAX;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init results
size_t * results = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (nCount * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
for (size_t nI = 0; nI < nCount; ++nI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI] = generateKeySet (n[nI], &genSeed, &keySetShapes[0]); // shape 0 is shapefConstBinary with 0 parents
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split the job
int32_t partIntervals[NUMBEROFTHREADS * 2];
int32_t onePart = (endSeed - startSeed) / NUMBEROFTHREADS;
int32_t iterateIntervals = startSeed;
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
if (i == NUMBEROFTHREADS - 1)
{
// give last thread the remaining seeds
partIntervals[i * 2] = iterateIntervals;
partIntervals[(i * 2) + 1] = endSeed;
}
else
{
partIntervals[i * 2] = iterateIntervals;
partIntervals[(i * 2) + 1] = iterateIntervals + onePart - 1;
iterateIntervals += onePart;
}
}
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
uint8_t r = opmphmOptR (n[nI]);
graphs[i] = opmphmGraphNew (opmphms[i], r, n[nI], opmphmMinC (r) + opmphmOptC (n[nI]));
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
KeySet * ks = keySetsCache[nI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * maxMappings * sizeof (size_t));
// try each seed part
for (int32_t seed = partIntervals[threadI * 2];
partIntervals[threadI * 2] <= seed && seed <= partIntervals[(threadI * 2) + 1]; ++seed)
{
if (threadI == 0 && (seed % 1000) == 0)
{
printf ("now at: n = %zu/%zu and seed %i from %i\r", nI + 1, nCount, seed, partIntervals[1]);
fflush (stdout);
}
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seed;
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n should have in sum endSeed - startSeed + 1 trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * maxMappings + mappingI];
}
if (sum != (size_t) endSeed - startSeed + 1)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
// write out
FILE * out = fopen ("benchmark_opmphm_mapping_allSeeds.csv", "w");
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";n_%zur_%uc_%f", n[nI], opmphmOptR (n[nI]), opmphmMinC (opmphmOptR (n[nI])) + opmphmOptC (n[nI]));
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";%zu", results[nI * maxMappings + mappingI]);
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < nCount; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (n);
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (results);
}
/**
* END ==================================================== Mapping All Seeds ========================================================== END
*/
/**
* START ================================================== OPMPHM Build Time ======================================================== START
*
* This benchmark measures the time of the OPMPHM build.
* Uses all KeySet shapes except 6, for all n (KeySet size) a fixed set of seeds is used to build the OPMPHM.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* For one n (KeySet size) ksPerN KeySets are used.
* The results are written out in the following format:
*
* n;ks;time
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * ( numberOfSeeds + nCount * ksPerN )
*/
/**
* @brief Measures the OPMPHM build numberOfRepeats time and returns median
*
* @param ks the KeySet
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkOPMPHMBuildTimeMeasure (KeySet * ks, size_t * repeats, size_t numberOfRepeats)
{
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keySearchFor = ks->array[0]; // just some key
Key * keyFound;
// fresh OPMPHM
if (ks->opmphm)
{
opmphmDel (ks->opmphm);
ks->opmphm = NULL;
}
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
keyFound = ksLookup (ks, keySearchFor, KDB_O_OPMPHM | KDB_O_NOCASCADING);
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
// sanity check
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not used");
}
if (keyFound != keySearchFor)
{
printExit ("Sanity Check Failed: found wrong Key");
}
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
void benchmarkOPMPHMBuildTime (char * name)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 5;
const size_t numberOfSeeds = 51;
const size_t numberOfRepeats = 7;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (numberOfSeeds % 2 == 0)
{
printExit ("numberOfSeeds is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
// memory allocation and initialization
// init seeds for mapping step in ksLookup (...)
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// init results
size_t * results = elektraMalloc (nCount * ksPerN * numberOfSeeds * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
// get seeds for mapping step in ksLookup (...)
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all seeds
for (size_t seedI = 0; seedI < numberOfSeeds; ++seedI)
{
// set seed to return by elektraRandGetInitSeed () in the lookup
elektraRandBenchmarkInitSeed = seeds[seedI];
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkOPMPHMBuildTimeMeasure (keySetStorage[ksI], repeats, numberOfRepeats);
// store res
results[((nI - startN) / stepN) * ksPerN * numberOfSeeds + ksI * numberOfSeeds + seedI] = res;
}
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_build_time", shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;ks;time\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
for (size_t seedI = 0; seedI < numberOfSeeds; ++seedI)
{
fprintf (out, "%zu;%zu;%zu\n", nI, ksI,
results[((nI - startN) / stepN) * ksPerN * numberOfSeeds + ksI * numberOfSeeds + seedI]);
}
}
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (keySetStorage);
elektraFree (keySetShapes);
elektraFree (results);
elektraFree (seeds);
}
/**
* END ==================================================== OPMPHM Build Time ========================================================== END
*/
/**
* START ================================================== OPMPHM Search Time ======================================================= START
*
* This benchmark measures the time of the OPMPHM search.
* Uses all KeySet shapes except 6, for one n (KeySet size) ksPerN KeySets are used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* Each measurement done with one KeySet is repeated numberOfRepeats time and summarized with the median.
* For one n (KeySet size) the ksPerN results are also summarized with the median.
* The results are written out in the following format:
*
* n;search_1;search_2;...;search_(numberOfSearches)
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN * (1 + searchesCount )
*/
/**
* @brief Measures the OPMPHM search time, for searches random Keys, repeats the measurement numberOfRepeats time and returns the media.
*
* The OPMPHM build will be triggerd if KDB_OPMPHM is set!
*
* @param ks the KeySet
* @param searches the number of searches to make
* @param searchSeed the random seed used to determine the Keys to search
* @param option the options passed to the ksLookup (...)
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkSearchTimeMeasure (KeySet * ks, size_t searches, int32_t searchSeed, elektraLookupFlags option, size_t * repeats,
size_t numberOfRepeats)
{
if (option & KDB_O_OPMPHM)
{
// trigger OPMPHM build if not build
if (!opmphmIsBuild (ks->opmphm))
{
// set seed to return by elektraRandGetInitSeed () in the lookup
elektraRandBenchmarkInitSeed = searchSeed;
(void) ksLookup (ks, ks->array[0], KDB_O_OPMPHM | KDB_O_NOCASCADING);
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("trigger OPMPHM build");
}
}
}
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// sanity checks
if (option & KDB_O_OPMPHM)
{
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not here");
}
}
else
{
if (ks->opmphm)
{
printExit ("Sanity Check Failed: OPMPHM here");
}
}
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
int32_t actualSearchSeed = searchSeed;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
for (size_t s = 1; s <= searches; ++s)
{
keyFound = ksLookup (ks, ks->array[actualSearchSeed % ks->size], option);
if (!keyFound || keyFound != ks->array[actualSearchSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&actualSearchSeed);
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// sanity checks
if (option & KDB_O_OPMPHM)
{
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not here");
}
}
else
{
if (ks->opmphm)
{
printExit ("Sanity Check Failed: OPMPHM here");
}
}
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
/**
* @brief Common part of search time benchmarks, used by benchmarkOPMPHMSearchTime and benchmarkBinarySearchTime.
*
* @param outFileName the output file name
* @param option the option to pass to the ksLookup (...)
*/
static void benchmarkSearchTime (char * name, char * outFileName, elektraLookupFlags option)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 3;
const size_t numberOfRepeats = 7;
const size_t startSearches = 500;
const size_t stepSearches = 500;
const size_t endSearches = 32000;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
size_t searchesCount = 0;
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
++searchesCount;
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * searchesCount * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init partialResult
size_t * partialResult = elektraMalloc (ksPerN * searchesCount * sizeof (size_t));
if (!partialResult)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all number of searches
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
int32_t searchSeed = 1;
if (getRandomSeed (&searchSeed) != &searchSeed) printExit ("Seed Parsing Error or feed me more seeds");
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkSearchTimeMeasure (keySetStorage[ksI], searchesI, searchSeed, option, repeats,
numberOfRepeats);
// save partial result to summarize it with median
partialResult[((searchesI - startSearches) / stepSearches) * ksPerN + ksI] = res;
}
}
// sort partialResult and take median as final result
for (size_t searchesI = 0; searchesI < searchesCount; ++searchesI)
{
qsort (&partialResult[searchesI * ksPerN], ksPerN, sizeof (size_t), cmpInteger);
results[((nI - startN) / stepN) * searchesCount + searchesI] =
partialResult[searchesI * ksPerN + (ksPerN / 2)];
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix (outFileName, shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n");
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
fprintf (out, ";search_%zu", searchesI);
}
fprintf (out, "\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
fprintf (out, "%zu", nI);
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
fprintf (out, ";%zu",
results[((nI - startN) / stepN) * searchesCount + ((searchesI - startSearches) / stepSearches)]);
}
fprintf (out, "\n");
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (partialResult);
elektraFree (keySetStorage);
elektraFree (keySetShapes);
elektraFree (results);
}
void benchmarkOPMPHMSearchTime (char * name)
{
benchmarkSearchTime (name, "benchmark_opmphm_search_time", KDB_O_OPMPHM | KDB_O_NOCASCADING);
}
/**
* END ==================================================== OPMPHM Search Time ========================================================= END
*/
/**
* START ================================================= Binary search Time ======================================================== START
*
* This benchmark measures the time of the binary search.
* Uses all KeySet shapes except 6, for one n (KeySet size) ksPerN KeySets are used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* Each measurement done with one KeySet is repeated numberOfRepeats time and summarized with the median.
* For one n (KeySet size) the ksPerN results are also summarized with the median.
* The results are written out in the following format:
*
* n;search_1;search_2;...;search_(numberOfSearches)
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN * (1 + searchesCount )
*/
static void benchmarkBinarySearchTime (char * name)
{
benchmarkSearchTime (name, "benchmark_binary_search_time", KDB_O_NOCASCADING);
}
/**
* END =================================================== Binary search Time ========================================================== END
*/
/**
* START ================================================= hsearch Build Time ======================================================== START
*
* This benchmark measures the time of the hsearch build.
* For one n (KeySet size) ksPerN KeySets are used, with different loads.
* This benchmark has a 10 strike policy, when 10 time the measured time is over 10000 the next KeySet shape is handled.
* The results are written out in the following format:
*
* n;ks;load;time
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN
*/
// clang-format off
// format bug
#ifdef HAVE_HSEARCHR
// clang-format on
/**
* @brief Measures the hsearch build numberOfRepeats time and returns median
*
* @param ks the KeySet
* @param nI the KeySet size
* @param load the load
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkHsearchBuildTimeMeasure (KeySet * ks, size_t nI, double load, size_t * repeats, size_t numberOfRepeats)
{
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * key;
ksRewind (ks);
ENTRY e;
ENTRY * ep;
// fresh htab
struct hsearch_data * htab = elektraCalloc (sizeof (struct hsearch_data));
if (!htab)
{
printExit ("calloc");
}
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
if (!hcreate_r (nI / load, htab))
{
printExit ("hcreate_r");
}
while ((key = ksNext (ks)))
{
e.key = (char *) keyName (key);
e.data = key;
if (!hsearch_r (e, ENTER, &ep, htab))
{
printExit ("hsearch_r");
}
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
// sanity check
ksRewind (ks);
while ((key = ksNext (ks)))
{
e.key = (char *) keyName (key);
if (!hsearch_r (e, FIND, &ep, htab))
{
printExit ("Sanity Check Failed: hsearch can not find element");
}
}
hdestroy_r (htab);
elektraFree (htab);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
void benchmarkHsearchBuildTime (char * name)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 5;
const size_t numberOfRepeats = 7;
const size_t maxStrikes = 10;
const size_t strikeLimit = 10000;
const size_t numberOfLoads = 4;
double * loads = malloc (sizeof (double) * numberOfLoads);
if (!loads)
{
printExit ("malloc");
}
loads[0] = 1;
loads[1] = 0.75;
loads[2] = 0.5;
loads[3] = 0.25;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * ksPerN * numberOfLoads * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
size_t strikes = 0;
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all loads
for (size_t loadI = 0; loadI < numberOfLoads; ++loadI)
{
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkHsearchBuildTimeMeasure (keySetStorage[ksI], nI, loads[loadI], repeats,
numberOfRepeats);
// strike policy
if (res > strikeLimit)
{
++strikes;
if (strikes >= maxStrikes)
{
ksI = ksPerN;
loadI = numberOfLoads;
nI = endN + 1;
printf ("shape %zu is out!\n", shapeI);
}
}
else
{
strikes = 0;
// save only non strike values
results[((nI - startN) / stepN) * ksPerN * numberOfLoads + ksI * numberOfLoads + loadI] =
res;
}
}
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_hsearch_build_time", shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;ks;load;time\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
for (size_t loadI = 0; loadI < numberOfLoads; ++loadI)
{
fprintf (out, "%zu;%zu;%f;%zu\n", nI, ksI, loads[loadI],
results[((nI - startN) / stepN) * ksPerN * numberOfLoads + ksI * numberOfLoads + loadI]);
}
}
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (keySetStorage);
elektraFree (loads);
elektraFree (keySetShapes);
elektraFree (results);
}
#endif
/**
* END =================================================== hsearch Build Time ========================================================== END
*/
/**
* START =================================================== Prediction Time ========================================================= START
*
* This benchmark measures the time from `numberOfSequences` lookup sequences, with the modified branch predictor and the binary search.
* All KeySet shapes except 6 where used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* For all `n` `patternsPerN` lookup patterns are created. With a length of `numberOfSequences`.
* The KeySet shapes rotate through the lookup patterns.
* Two entries of the pattern entries use one seed (31 bit), this works because max n is 10000.
* log_2 (opmphmPredictorWorthOpmphm(10000)) * 2) < 15 bit
* log_2 (15000) * 2) < 15 bit
*
* n;predictiontime;binarysearchtime
*
* The number of needed seeds for this benchmarks is: nCount * patternsPerN * ( numberOfSequences/2 + 1 + numberOfSequences )
*/
static void benchmarkPredictionTime (char * name)
{
const size_t numberOfRepeats = 5;
const size_t numberOfSequences = 66;
const size_t patternsPerN = 999;
// create the n array
const size_t nCount = 35;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
size_t controlCount = 0;
for (size_t i = 100; i < 1000; i += 100)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 1000; i < 5000; i += 200)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 5000; i <= 10000; i += 1000)
{
n[controlCount] = i;
++controlCount;
}
// check config
if (controlCount != nCount)
{
printExit ("controlCount != nCount");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (patternsPerN % (numberOfShapes - 1) == 0)
{
printExit ("not all shapes used equally");
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * patternsPerN * 2 * sizeof (size_t)); // 2 prediction and binary search
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init seeds
int32_t * seeds = elektraMalloc (numberOfSequences * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// init pattern
size_t * pattern = elektraMalloc (numberOfSequences * sizeof (size_t));
if (!pattern)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all n
for (size_t nI = 0; nI < nCount; ++nI)
{
// for all pattern per n
for (size_t pI = 0; pI < patternsPerN; ++pI)
{
printf ("now at: n = %zu/%zu pattern = %zu/%zu \r", nI + 1, nCount, pI + 1, patternsPerN);
fflush (stdout);
// create pattern, always two entries with one seed
for (size_t s = 0; s < numberOfSequences; s += 2)
{
int32_t genSeed = 0;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
// 15 bit each of the 31 bit seed
size_t sequnceLength1 = (genSeed >> 15) & 0x7FFF;
size_t sequnceLength0 = genSeed & 0x7FFF;
sequnceLength1 = sequnceLength1 % (opmphmPredictorWorthOpmphm (n[nI]) * 2 - 1);
sequnceLength0 = sequnceLength0 % (opmphmPredictorWorthOpmphm (n[nI]) * 2 - 1);
pattern[s + 1] = sequnceLength1 + 1;
pattern[s] = sequnceLength0 + 1;
}
// rotate through all KeySet shapes, except 6
size_t shapeI = patternsPerN % (numberOfShapes - 1);
if (shapeI == 6)
{
++shapeI;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// generate KeySet
int32_t genSeed = 0;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
KeySet * ks = generateKeySet (n[nI], &genSeed, usedKeySetShape);
// get seeds for OPMPHM
for (size_t s = 0; s < numberOfSequences; ++s)
{
if (getRandomSeed (&seeds[s]) != &seeds[s]) printExit ("Seed Parsing Error or feed me more seeds");
}
size_t resultPredition;
size_t resultBinarySearch;
// benchmark prediction
// repeat measurement numberOfRepeats time
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// for all sequences
for (size_t s = 0; s < numberOfSequences; ++s)
{
// seed used for key to lookup and OPMPHM
int32_t searchHashSeed = seeds[s];
// set seed to return by elektraRandGetInitSeed () in the lookup, in case of hashing
elektraRandBenchmarkInitSeed = searchHashSeed;
// do the lookups
for (size_t lookups = 0; lookups < pattern[s]; ++lookups)
{
keyFound = ksLookup (ks, ks->array[searchHashSeed % ks->size], KDB_O_NOCASCADING);
if (!keyFound || keyFound != ks->array[searchHashSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&searchHashSeed);
}
if (!ks->opmphmPredictor)
{
printExit ("Sanity Check Failed: no predictor used");
}
// simulate data change
ks->flags |= KS_FLAG_NAME_CHANGE;
if (ks->opmphm) opmphmClear (ks->opmphm);
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
resultPredition = repeats[numberOfRepeats / 2];
// benchmark binary search
// repeat measurement numberOfRepeats time
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// for all sequences
for (size_t s = 0; s < numberOfSequences; ++s)
{
// seed used for key to lookup and OPMPHM
int32_t searchHashSeed = seeds[s];
// do the lookups
for (size_t lookups = 0; lookups < pattern[s]; ++lookups)
{
keyFound = ksLookup (ks, ks->array[searchHashSeed % ks->size],
KDB_O_NOCASCADING | KDB_O_BINSEARCH);
if (!keyFound || keyFound != ks->array[searchHashSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&searchHashSeed);
}
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
resultBinarySearch = repeats[numberOfRepeats / 2];
results[nI * patternsPerN * 2 + pI * 2] = resultPredition;
results[nI * patternsPerN * 2 + pI * 2 + 1] = resultBinarySearch;
ksDel (ks);
}
}
printf ("\n");
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_prediction_time", opmphmPredictorHistoryMask >> 4); // shift 16 to 8 bit
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;predictiontime;binarysearchtime\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t pI = 0; pI < patternsPerN; ++pI)
{
size_t predictiontime = results[nI * patternsPerN * 2 + pI * 2];
size_t binarysearchtime = results[nI * patternsPerN * 2 + pI * 2 + 1];
fprintf (out, "%zu;%zu;%zu\n", n[nI], predictiontime, binarysearchtime);
}
}
fclose (out);
elektraFree (n);
elektraFree (keySetShapes);
elektraFree (results);
elektraFree (repeats);
elektraFree (pattern);
elektraFree (seeds);
}
/**
* END ===================================================== Prediction Time =========================================================== END
*/
/**
* START ================================================= Prints all KeySetShapes =================================================== START
*/
static void benchmarkPrintAllKeySetShapes (char * name)
{
printf ("%s\n", name);
const size_t n = 30;
int32_t seed = 47658589;
KeySetShape * keySetShapes = getKeySetShapes ();
for (size_t shapeId = 0; shapeId < numberOfShapes; ++shapeId)
{
int32_t s = seed;
//~ timeInit ();
KeySet * ks = generateKeySet (n, &s, &keySetShapes[shapeId]);
//~ timePrint ("generateKeySet:");
// print KS
if (1)
{
printf (" ======================= shapeId %zu =======================\n\n", shapeId);
Key * key;
ksRewind (ks);
while ((key = ksNext (ks)))
{
printf ("%s\n", keyName (key));
}
printf ("\n ======================== size %zd ========================\n\n", ksGetSize (ks));
}
ksDel (ks);
}
elektraFree (keySetShapes);
}
/**
* END =================================================== Prints all KeySetShapes ===================================================== END
*/
int main (int argc, char ** argv)
{
// define all benchmarks
size_t benchmarksCount = 9;
#ifdef HAVE_HSEARCHR
// hsearchbuildtime
++benchmarksCount;
#endif
Benchmark * benchmarks = elektraMalloc (benchmarksCount * sizeof (Benchmark));
if (!benchmarks)
{
printExit ("malloc");
}
// hashfunctiontime
char * benchmarkNameHashFunctionTime = "hashfunctiontime";
benchmarks[0].name = benchmarkNameHashFunctionTime;
benchmarks[0].benchmarkF = benchmarkHashFunctionTime;
benchmarks[0].numberOfSeedsNeeded = 32;
// mapping
char * benchmarkNameMapping = "mapping";
benchmarks[1].name = benchmarkNameMapping;
benchmarks[1].benchmarkF = benchmarkMapping;
benchmarks[1].numberOfSeedsNeeded = 12400;
// mapping_opt
char * benchmarkNameMappingOpt = "mapping_opt";
benchmarks[2].name = benchmarkNameMappingOpt;
benchmarks[2].benchmarkF = benchmarkMappingOpt;
benchmarks[2].numberOfSeedsNeeded = 93920;
// mapping_allseeds
char * benchmarkNameMappingAllSeeds = "mapping_allseeds";
benchmarks[3].name = benchmarkNameMappingAllSeeds;
benchmarks[3].benchmarkF = benchmarkMappingAllSeeds;
benchmarks[3].numberOfSeedsNeeded = 7;
// printallkeysetshapes
char * benchmarkNamePrintAllKeySetShapes = "printallkeysetshapes";
benchmarks[4].name = benchmarkNamePrintAllKeySetShapes;
benchmarks[4].benchmarkF = benchmarkPrintAllKeySetShapes;
benchmarks[4].numberOfSeedsNeeded = 0;
// opmphmbuildtime
char * benchmarkNameOpmphmBuildTime = "opmphmbuildtime";
benchmarks[5].name = benchmarkNameOpmphmBuildTime;
benchmarks[5].benchmarkF = benchmarkOPMPHMBuildTime;
benchmarks[5].numberOfSeedsNeeded = 1757;
// opmphmsearchtime
char * benchmarkNameOpmphmSearchTime = "opmphmsearchtime";
benchmarks[6].name = benchmarkNameOpmphmSearchTime;
benchmarks[6].benchmarkF = benchmarkOPMPHMSearchTime;
benchmarks[6].numberOfSeedsNeeded = 54600;
// binarysearchtime
char * benchmarkNameBinarySearchTime = "binarysearchtime";
benchmarks[7].name = benchmarkNameBinarySearchTime;
benchmarks[7].benchmarkF = benchmarkBinarySearchTime;
benchmarks[7].numberOfSeedsNeeded = 54600;
// predictiontime
char * benchmarkNamePredictionTime = "predictiontime";
benchmarks[8].name = benchmarkNamePredictionTime;
benchmarks[8].benchmarkF = benchmarkPredictionTime;
benchmarks[8].numberOfSeedsNeeded = 3496500;
#ifdef HAVE_HSEARCHR
// hsearchbuildtime
char * benchmarkNameHsearchBuildTime = "hsearchbuildtime";
benchmarks[benchmarksCount - 1].name = benchmarkNameHsearchBuildTime;
benchmarks[benchmarksCount - 1].benchmarkF = benchmarkHsearchBuildTime;
benchmarks[benchmarksCount - 1].numberOfSeedsNeeded = 1400;
#endif
// run benchmark
if (argc == 1)
{
fprintf (stderr, "Usage: cat <fileWithSeeds> | %s <benchmark>\n", argv[0]);
fprintf (stderr, "\nUse the generate-seeds script to generate <fileWithSeeds>, number of seeds according to:\n\n");
fprintf (stderr, "%-20s %10s\n", "<benchmark>", "seeds");
for (size_t i = 0; i < benchmarksCount; ++i)
{
fprintf (stderr, "%-20s %10zu\n", benchmarks[i].name, benchmarks[i].numberOfSeedsNeeded);
}
elektraFree (benchmarks);
return EXIT_FAILURE;
}
for (size_t i = 0; i < benchmarksCount; ++i)
{
if (!strncmp (benchmarks[i].name, argv[1], strlen (argv[1])))
{
benchmarks[i].benchmarkF (benchmarks[i].name);
elektraFree (benchmarks);
return EXIT_SUCCESS;
}
}
fprintf (stderr, "Error: %s is not a benchmark\n", argv[1]);
fprintf (stderr, "Available benchmarks:\n");
for (size_t i = 0; i < benchmarksCount; ++i)
{
fprintf (stderr, "* %s\n", benchmarks[i].name);
}
elektraFree (benchmarks);
return EXIT_FAILURE;
}
/**
* Benchmark helpers
*/
/**
* @brief Read a seed from STDIN.
*
* @param seed storage for the read in seed
*
* @retval int32_t * on success
* @retval NULL on read or parse error
*/
static int32_t * getRandomSeed (int32_t * seed)
{
// read from stdin
char data[10 + 2]; // min = 0, max = 2^32 - 1, len(2^32 - 1) = 10 + '\n' + '\0'
if (fgets (data, 12, stdin) != data)
{
return NULL;
}
// eliminate newline
char * c;
for (c = data; *c != '\n'; ++c)
;
*c = '\0';
// prevent empty lines
if (strlen (data) == 0)
{
return NULL;
}
// convert to int
char * pEnd;
*seed = strtol (data, &pEnd, 10);
if (*pEnd != '\0')
{
return NULL;
}
return seed;
}
/**
* @brief Opens file with OPMPHMR_PARTITE postfix.
*
* supports OPMPHMTUPLE < 100
*
* @param name name of the file
*
* @retval FILE * on success
* @retval NULL on error
*/
static FILE * openOutFileWithRPartitePostfix (const char * name, uint8_t r)
{
const char * const format = "%u.csv";
char formatData[strlen (name) + strlen (format) + 1];
char filename[strlen (name) + strlen (format) + 1];
strcpy (formatData, name);
strcpy (&formatData[strlen (name)], format);
sprintf (filename, formatData, r);
FILE * out = fopen (filename, "w");
if (!out)
{
return NULL;
}
return out;
}
static const char * getString (void * data)
{
return keyName ((Key *) data);
}
/**
* @brief Power function.
*
* @param p basis
* @param q exponent
*
* @retval size_t p^q
*/
static size_t getPower (size_t p, size_t q)
{
size_t result = 1;
for (size_t t = 0; t < q; ++t)
{
result *= p;
}
return result;
}
/**
* @brief comparison between integers suitable as qsort callback.
*
* @param a first integer
* @param b second integer
*
*/
static int cmpInteger (const void * a, const void * b)
{
if (*(size_t *) a < *(size_t *) b)
{
return -1;
}
else if (*(size_t *) a > *(size_t *) b)
{
return 1;
}
else
{
return 0;
}
}
/**
* The Key Set shapes
*/
/**
* every key name is unique and goes 1 level deep
*/
static void shapefConstBinary (const size_t initSize ELEKTRA_UNUSED, size_t size ELEKTRA_UNUSED, size_t level ELEKTRA_UNUSED,
int32_t * seed ELEKTRA_UNUSED, KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
ret->subKeys = 0;
}
/**
* binary tree
*/
static void shapefBinaryBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t subKeys = 2;
ret->label = 0;
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* every parent has n/branchfactor children
*/
static void shapefDynamicBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t branchRatio = 9;
ret->label = 0;
size_t subKeys = (initSize / branchRatio);
if (subKeys < 2)
{
subKeys = 2;
}
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* all key names have a common start, startLevel length
*/
static void shapefLateDynamicBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t startLevel = 5;
size_t branchRatio = 9;
ret->label = 0;
if (level < startLevel)
{
ret->subKeys = 1;
return;
}
level -= startLevel;
size_t subKeys = (initSize / branchRatio);
if (subKeys < 2)
{
subKeys = 2;
}
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* all key names have a common start and end
*/
static void * shapeCommonStartEndInit (void)
{
uint8_t * data = elektraMalloc (sizeof (uint8_t));
if (!data)
{
return NULL;
}
*data = 0;
return data;
}
static void shapeCommonStartEndDel (void * data)
{
elektraFree (data);
}
static void shapefCommonStartEnd (const size_t initSize ELEKTRA_UNUSED, size_t size, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data)
{
size_t notCommonLevel = 4;
size_t maxLevel = 10;
if (level < notCommonLevel)
{
// creates common start
ret->subKeys = 1;
ret->label = 0;
}
else if (notCommonLevel == level)
{
// creates level with different names
ret->subKeys = size + 1;
ret->label = 0;
}
else if (level > notCommonLevel)
{
uint8_t * isLabelSet = data;
if (!*isLabelSet)
{
// creates common end
if (level == notCommonLevel + 1)
{
// set label
ret->label = 1;
ret->subKeys = 1;
}
else if (level == maxLevel)
{
// end of deep key
ret->label = 0;
ret->subKeys = 0;
*isLabelSet = 1;
}
else
{
// create deep key
ret->label = 0;
ret->subKeys = 1;
}
}
else
{
// use common end
ret->subKeys = -1;
ret->label = 1;
}
}
}
/**
* modules, level 1 keys same, one level 2 key stores the modules. Like system/elektra.
*/
static void * shapeModulesInit (void)
{
// three boolean flags if the respective label where set, the fourth counts from 1 to 3 for label assignment
void * data = elektraMalloc (4 * sizeof (uint8_t));
if (!data)
{
return NULL;
}
uint8_t * d = data;
d[0] = 0;
d[1] = 0;
d[2] = 0;
d[3] = 1;
return data;
}
static void shapeModulesDel (void * data)
{
elektraFree (data);
}
static void shapefModules (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data)
{
// label 1 5 subKeys
// label 2 10 subKeys
// label 3 20 subKeys
ssize_t modulesKeys[3] = { 5, 10, 15 };
uint8_t * d = data;
uint8_t * firstSet = &d[0];
uint8_t * secondSet = &d[1];
uint8_t * thirdSet = &d[2];
uint8_t * assign = &d[3];
if (level == 1)
{
// common start, simulates elektra in system/elektra
ret->subKeys = 1;
ret->label = 0;
}
else if (level == 2)
{
// common name, simulates modules in system/elektra/modules
// calculates how many modules have space
ret->subKeys = 0;
ssize_t remainingSize = initSize;
uint8_t isSpace = 1;
uint8_t l = 0;
while (isSpace)
{
if (remainingSize - modulesKeys[l] < 0)
{
isSpace = 0;
}
else
{
remainingSize -= modulesKeys[l];
l = (l + 1) % 3;
++ret->subKeys;
}
}
// add solo keys
ret->subKeys += remainingSize;
ret->label = 0;
}
else if (level == 3)
{
// give each modules ret->subKeys * 5 subKeys
if (!*firstSet)
{
ret->subKeys = 1;
ret->label = 1;
*firstSet = 1;
}
else if (!*secondSet)
{
ret->subKeys = 2;
ret->label = 2;
*secondSet = 1;
}
else if (!*thirdSet)
{
ret->subKeys = 3;
ret->label = 3;
*thirdSet = 1;
}
else
{
// assign
ret->subKeys = -1;
ret->label = *assign;
*assign = (*assign % 3) + 1;
}
}
else if (level == 4)
{
// the 5 in ret->subKeys * 5
ret->subKeys = 5;
ret->label = 0;
}
else
{
// terminate keys
ret->subKeys = 0;
ret->label = 0;
}
}
/**
* always wider, subKeys are incremented by one every level
*/
static void shapefWide (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
size_t startSubKeys = 2;
// determine to which level it is possible to go
size_t l = 0; // level 0 should have 2 subs
size_t keysOnLevel = startSubKeys;
while (keysOnLevel <= initSize)
{
++l;
keysOnLevel *= startSubKeys + l;
}
if (level < l)
{
ret->subKeys = startSubKeys + level;
}
else
{
ret->subKeys = 0;
}
}
/**
* always tighter, subKeys are decrementing by one every level till two is reached
*/
static void shapefTight (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
size_t startSubKeys = 2;
// determine to which level it is possible to go
size_t l = 0; // level 0 should have 2 subs
size_t keysOnLevel = startSubKeys;
while (keysOnLevel <= initSize)
{
++l;
keysOnLevel *= startSubKeys + l;
}
if (level < l)
{
ret->subKeys = startSubKeys + l - level - 1;
}
else
{
ret->subKeys = 0;
}
}
/**
* @brief Set the shape functions and parameters together to get the KeySetShape population.
*
* @retval KeySetShape * on success
*/
static KeySetShape * getKeySetShapes (void)
{
KeySetShape * out = elektraMalloc (sizeof (KeySetShape) * numberOfShapes);
if (!out) printExit ("malloc KeySetShapes");
size_t shapeCount = 0;
// shapefConstBinary
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 21;
out[shapeCount].special = 127;
out[shapeCount].parent = 0;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefConstBinary;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefBinaryBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 1;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefBinaryBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefDynamicBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefDynamicBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefLateDynamicBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefLateDynamicBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefWide
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefWide;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefTight
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefTight;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefCommonStartEnd
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 21;
out[shapeCount].special = 50;
out[shapeCount].parent = 0;
out[shapeCount].shapeInit = shapeCommonStartEndInit;
out[shapeCount].shapef = shapefCommonStartEnd;
out[shapeCount].shapeDel = shapeCommonStartEndDel;
++shapeCount;
// shapefModules
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = shapeModulesInit;
out[shapeCount].shapef = shapefModules;
out[shapeCount].shapeDel = shapeModulesDel;
++shapeCount;
if (shapeCount != numberOfShapes) printExit ("shapeCount != numberOfShapes");
return out;
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/gem.h"
#include "magick/gem-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the RGBTransformImage method is:
%
% MagickBooleanType RGBTransformImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertRGBToCMY(const Quantum red,const Quantum green,
const Quantum blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToLab(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *a,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,L,a,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const Quantum red,const Quantum green,
const Quantum blue,double *low_x,double *low_y,double *cap_Y)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
*low_x=X/(X+Y+Z);
*low_y=Y/(X+Y+Z);
*cap_Y=Y;
}
static void ConvertRGBToYPbPr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static void ConvertRGBToYDbDr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
MagickExport MagickBooleanType RGBTransformImage(Image *image,
const ColorspaceType colorspace)
{
#define RGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
pixel.red=(MagickRealType) pixel.red;
pixel.green=(MagickRealType) pixel.green;
pixel.blue=(MagickRealType) pixel.blue;
ConvertRGBToCMYK(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from sRGB to HSI.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum((MagickRealType) GetPixelRed(q));
green=ClampToQuantum((MagickRealType) GetPixelGreen(q));
blue=ClampToQuantum((MagickRealType) GetPixelBlue(q));
switch (colorspace)
{
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,logmap[ScaleQuantumToMap(red)]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601LumaColorspace:
{
/*
Initialize Rec601 luma tables:
G = 0.298839*R+0.586811*G+0.114350*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
x_map[i].y=(MagickRealType) (0.298839*(double) i);
y_map[i].y=(MagickRealType) (0.586811*(double) i);
z_map[i].y=(MagickRealType) (0.114350*(double) i);
x_map[i].z=(MagickRealType) (0.298839*(double) i);
y_map[i].z=(MagickRealType) (0.586811*(double) i);
z_map[i].z=(MagickRealType) (0.114350*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709LumaColorspace:
{
/*
Initialize Rec709 luma tables:
G = 0.212656*R+0.715158*G+0.072186*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
x_map[i].y=(MagickRealType) (0.212656*(double) i);
y_map[i].y=(MagickRealType) (0.715158*(double) i);
z_map[i].y=(MagickRealType) (0.072186*(double) i);
x_map[i].z=(MagickRealType) (0.212656*(double) i);
y_map[i].z=(MagickRealType) (0.715158*(double) i);
z_map[i].z=(MagickRealType) (0.072186*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.003962014134275617*i;
y_map[i].x=0.007778268551236748*i;
z_map[i].x=0.001510600706713781*i;
x_map[i].y=(-0.002426619775463276)*i;
y_map[i].y=(-0.004763965913702149)*i;
z_map[i].y=0.007190585689165425*i;
x_map[i].z=0.006927257754597858*i;
y_map[i].z=(-0.005800713697502058)*i;
z_map[i].z=(-0.0011265440570958)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.2201118963486454*(1.099*i-0.099);
y_map[i].x=0.4321260306242638*(1.099*i-0.099);
z_map[i].x=0.08392226148409894*(1.099*i-0.099);
x_map[i].y=(-0.1348122097479598)*(1.099*i-0.099);
y_map[i].y=(-0.2646647729834528)*(1.099*i-0.099);
z_map[i].y=0.3994769827314126*(1.099*i-0.099);
x_map[i].z=0.3848476530332144*(1.099*i-0.099);
y_map[i].z=(-0.3222618720834477)*(1.099*i-0.099);
z_map[i].z=(-0.06258578094976668)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
register size_t
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
(MagickRealType) primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
(MagickRealType) primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
(MagickRealType) primary_info.z;
SetPixelRed(q,ScaleMapToQuantum(pixel.red));
SetPixelGreen(q,ScaleMapToQuantum(pixel.green));
SetPixelBlue(q,ScaleMapToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RGBTransformImage)
#endif
proceed=SetImageProgress(image,RGBTransformImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register size_t
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=ScaleMapToQuantum(pixel.red);
image->colormap[i].green=ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace)
{
ImageType
type;
MagickBooleanType
status;
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if ((image->intensity == Rec601LuminancePixelIntensityMethod) ||
(image->intensity == Rec709LuminancePixelIntensityMethod))
image->gamma=1.0;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.0;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,&image->exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(MagickTrue);
if ((image->colorspace == Rec709LumaColorspace) &&
(colorspace == sRGBColorspace))
return(MagickTrue);
if ((image->colorspace == GRAYColorspace) && (image->gamma != 1.0) &&
(colorspace == sRGBColorspace))
return(MagickTrue);
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformRGBImage(image,image->colorspace));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformRGBImage(image,image->colorspace);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (RGBTransformImage(image,colorspace) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the TransformRGBImage method is:
%
% MagickBooleanType TransformRGBImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(1.0-cyan));
*green=ClampToQuantum(QuantumRange*(1.0-magenta));
*blue=ClampToQuantum(QuantumRange*(1.0-yellow));
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const MagickRealType value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel)
{
pixel->red=((QuantumRange-(QuantumScale*pixel->red*
(QuantumRange-pixel->index)+pixel->index)));
pixel->green=((QuantumRange-(QuantumScale*pixel->green*
(QuantumRange-pixel->index)+pixel->index)));
pixel->blue=((QuantumRange-(QuantumScale*pixel->blue*
(QuantumRange-pixel->index)+pixel->index)));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
X=cap_Y/low_y*low_x;
Y=cap_Y;
Z=cap_Y/low_y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y-
1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5)));
*green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y-
0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5)));
*blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+
1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5)));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,Quantum *red,Quantum *green,Quantum *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5)));
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+
0.6210244164652610754*(Q-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)-
0.6473805968256950427*(Q-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+
1.7046149983646481374*(Q-0.5)));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+
1.1398279671717170825*(V-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)-
0.5805003156565656797*(V-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)-
4.813762626262513e-04*(V-0.5)));
}
MagickExport MagickBooleanType TransformRGBImage(Image *image,
const ColorspaceType colorspace)
{
#define TransformRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000
};
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=(MagickRealType) GetPixelGray(q);
if ((image->intensity == Rec601LuminancePixelIntensityMethod) ||
(image->intensity == Rec709LuminancePixelIntensityMethod))
gray=EncodePixelGamma(gray);
SetPixelRed(q,ClampToQuantum(gray));
SetPixelGreen(q,ClampToQuantum(gray));
SetPixelBlue(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from source colorspace to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
X=QuantumScale*GetPixelRed(q);
Y=QuantumScale*GetPixelGreen(q);
Z=QuantumScale*GetPixelBlue(q);
switch (colorspace)
{
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) red));
SetPixelGreen(q,ClampToQuantum((MagickRealType) green));
SetPixelBlue(q,ClampToQuantum((MagickRealType) blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelRed(q))]));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelGreen(q))]));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelBlue(q))]));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q)));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(1.0*(double) i);
y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(1.0*(double) i);
y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(1.0*(double) i);
y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) (0.0000000);
z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) (0.0000000);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransformRGBImage)
#endif
proceed=SetImageProgress(image,TransformRGBImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=ClampToQuantum(pixel.red);
image->colormap[i].green=ClampToQuantum(pixel.green);
image->colormap[i].blue=ClampToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
GB_unaryop__identity_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_uint64
// op(A') function: GB_tran__identity_fp64_uint64
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_uint64
(
double *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
libtorch_utils.h | #ifndef libtorch_UTILS
#define libtorch_UTILS
/*
Copyright (c) 2019, Sanaxen
All rights reserved.
Use of this source code is governed by a MIT license that can be found
in the LICENSE file.
*/
#include <random>
#include "util/utils.h"
#ifdef USE_IMAGE_UTIL
#include "util/Image.hpp"
#endif
namespace cpp_torch
{
inline void nop() {
// do nothing
}
/**
* error exception class
**/
class error_exception : public std::exception {
public:
explicit error_exception(const std::string &msg) : msg_(msg) {
fprintf(stderr, "ERROR:%s\n", msg.c_str());
fflush(stderr);
}
const char *what() const throw() override { return msg_.c_str(); }
private:
std::string msg_;
};
inline size_t tensor_flatten_size(torch::Tensor& t)
{
size_t s = 1;
if (t.dim())
{
for (int i = 0; i < t.sizes().size(); i++)
{
s *= t.sizes()[i];
}
return s;
}
return 0;
}
inline void dump_dim(const std::string & s, torch::Tensor& t)
{
printf("%s dim:%d ", s.c_str(), t.dim());
if (t.dim())
{
for (int i = 0; i < t.sizes().size() - 1; i++)
{
printf("%d x", t.sizes()[i]);
}
printf("%d\n", t.sizes()[t.sizes().size() - 1]);
}
fflush(stdout);
}
inline void dump_dim(char* s, torch::Tensor& t)
{
dump_dim(std::string(s), t);
}
void label2vec(const std::vector<tiny_dnn::label_t>& labels, std::vector<tiny_dnn::vec_t>& vec, int max_label)
{
vec.clear();
vec.resize(labels.size());
#pragma omp parallel for
for (int i = 0; i < labels.size(); i++)
{
tiny_dnn::vec_t t(max_label, 0);
t[labels[i]] = 1.0;
vec[i] = t;
}
}
template <
typename initial_vector>
inline void toTorchTensors(initial_vector& vec, std::vector<torch::Tensor>& tensor_vect)
{
tensor_vect.resize(vec.size());
#pragma omp parallel for
for (int i = 0; i < vec.size(); i++)
{
//torch::Tensor& tensor = torch::tensor({ vec[i] }); 1.3
torch::Tensor& tensor = torch::tensor(vec[i]);
tensor_vect[i] = tensor;
}
}
template <
typename initial_vector>
inline torch::Tensor toTorchTensors(initial_vector& vec)
{
//return torch::tensor({ vec }); 1.3
return torch::tensor( vec );
}
inline std::vector<tiny_dnn::tensor_t> toTensor_t(torch::Tensor& x, int batch, int channel, int h, int w)
{
std::vector<tiny_dnn::tensor_t> y;
const int size = channel * h*w;
torch::Tensor& xx = x.view({ batch, 1,1, size });
y.resize(batch);
#pragma omp parallel for
for (int i = 0; i < batch; i++)
{
tiny_dnn::tensor_t t;
tiny_dnn::vec_t v(size);
for (int j = 0; j < size; j++)
{
v[j] = xx[i][0][0][j].template item<float_t>();
}
t.push_back(v);
y[i] = t;
}
return y;
}
inline tiny_dnn::tensor_t toTensor_t(torch::Tensor& x, int channel, int h, int w)
{
const int size = channel * h*w;
torch::Tensor& xx = x.view({ 1, 1,1, size });
tiny_dnn::tensor_t t;
tiny_dnn::vec_t v(size);
#if 0
#pragma omp parallel for
for (int j = 0; j < size; j++)
{
v[j] = xx[0][0][0][j].template item<float_t>();
}
#else
const float* p = xx.cpu().data<float>();
v.assign(p, p + size);
#endif
t.push_back(v);
return t;
}
inline tiny_dnn::vec_t toTensor_t(torch::Tensor& x, int size)
{
torch::Tensor& xx = x.view({ 1, 1,1, size });
tiny_dnn::vec_t v(size);
#if 0
#pragma omp parallel for
for (int j = 0; j < size; j++)
{
v[j] = xx[0][0][0][j].template item<float_t>();
}
#else
const float* p = xx.cpu().data<float_t>();
v.assign(p, p + size);
#endif
return v;
}
inline int get_BATCH(const std::vector<torch::Tensor>& images, torch::Tensor& batch_images, const int batchSize, std::vector<int>& index)
{
int batchNum = images.size() / batchSize;
if (batchNum == 0)
{
printf("input size < batch size.\n"); fflush(stdout);
throw error_exception("input size < batch size");
}
batch_images = images[index[0]];
for (int i = 1; i < index.size(); i++)
{
batch_images = torch::cat({ batch_images, images[index[i]] }, 0);
}
return batchNum;
}
void TensorToImageFile(torch::Tensor image_tensor, const std::string& filename, const int scale = 1.0)
{
#ifdef USE_IMAGE_UTIL
const int channels = image_tensor.sizes()[0];
const int h = image_tensor.sizes()[1];
const int w = image_tensor.sizes()[2];
if (channels == 0 || channels > 3)
{
dump_dim("image_tensor", image_tensor);
throw error_exception("tensor dimension != CxHxW");
}
tiny_dnn::tensor_t& img = toTensor_t(image_tensor.view({ channels, h,w }), channels, h, w);
const int sz = img[0].size();
#pragma omp parallel for
for (int i = 0; i < sz; i++)
{
img[0][i] *= scale;
}
Image rgb_img = vec_t2image(img[0], channels, h, w);
ImageWrite(filename.c_str(), &rgb_img);
#else
throw error_exception("undefined USE_IMAGE_UTIL");
#endif
}
inline int get_BATCH(const std::vector<torch::Tensor>& images, const std::vector<torch::Tensor>& labels, torch::Tensor& batch_images, torch::Tensor& batch_labels, const int batchSize, std::vector<int>& index)
{
int batchNum = images.size() / batchSize;
if (batchNum == 0)
{
printf("input size < batch size.\n"); fflush(stdout);
throw error_exception("input size < batch size");
}
batch_images = images[index[0]];
batch_labels = labels[index[0]];
for (int i = 1; i < index.size(); i++)
{
batch_images = torch::cat({ batch_images, images[index[i]] }, 0);
batch_labels = torch::cat({ batch_labels, labels[index[i]] }, 0);
}
return batchNum;
}
inline void optimizer_lr_chg(std::string& optimizer_name, torch::optim::Optimizer* optimizer, float alp)
{
if (optimizer_name == "adam")
{
auto& opt = static_cast<torch::optim::AdamOptions&>(optimizer->param_groups()[0].options());
if (opt.lr() < 1.0e-5) return;
printf("\nlr:%.10f ->", opt.lr());
opt.lr(opt.lr()*alp);
printf(" %.10f\n", opt.lr());
}
if (optimizer_name == "sgd")
{
auto& opt = static_cast<torch::optim::SGDOptions&>(optimizer->param_groups()[0].options());
if (opt.lr() < 1.0e-5) return;
opt.lr(opt.lr()*alp);
}
if (optimizer_name == "rmsprop")
{
auto& opt = static_cast<torch::optim::RMSpropOptions&>(optimizer->param_groups()[0].options());
if (opt.lr() < 1.0e-5) return;
opt.lr(opt.lr()*alp);
}
if (optimizer_name == "adagrad")
{
auto& opt = static_cast<torch::optim::AdagradOptions&>(optimizer->param_groups()[0].options());
if (opt.lr() < 1.0e-5) return;
opt.lr(opt.lr()*alp);
}
}
template <
typename Model>
class network_torch
{
std::vector<float_t> Tolerance_Set;
float loss_value = 0.0;
float clip_grad_value = 0.0;
bool early_stopping = false;
int patience = 100;
public:
int in_channels = 1;
int in_H = 1;
int in_W = 1;
int out_channels = 1;
int out_H = 1;
int out_W = 1;
tiny_dnn::timer time_measurement;
torch::Device device;
Model model;
std::string optimizer_name;
/**
* @param model_ model of neural networks
* @param optimizer_ optimizing algorithm for training
* @param device_ Device Type(kCPU, kCUDA)
* assume
*/
network_torch(Model& model_, torch::Device device_)
:model(model_), device(device_)
{
try
{
model.get()->to(device);
}
catch (std::exception& e)
{
printf("%s\n", e.what());
exit(0);
}
}
inline void set_early_stopping(bool flag, int patience_=100)
{
early_stopping = flag;
patience = patience_;
}
inline bool get_early_stopping()
{
return early_stopping;
}
inline void set_clip_grad_norm(float v)
{
clip_grad_value = v;
}
inline float get_clip_grad_norm()
{
return clip_grad_value;
}
inline void input_dim(int c, int w, int h)
{
in_channels = c;
in_H = h;
in_W = w;
}
inline void output_dim(int c, int w, int h)
{
out_channels = c;
out_H = h;
out_W = w;
}
bool classification = false;
bool batch_shuffle = true;
bool pre_make_batch = true;
bool stop_training_ = false;
/**
* request to finish an ongoing training
*
* It is safe to test the current network performance in @a
* on_batch_enumerate
* and
* @a on_epoch_enumerate callbacks during training.
*/
inline void stop_ongoing_training() { stop_training_ = true; }
/**
* @param images array of input data
* @param batch_x input data batch
* assume
*/
inline void generate_BATCH(
std::vector<torch::Tensor> &images,
std::vector< torch::Tensor>& batch_x
)
{
bool shuffle = batch_shuffle;
const int batchNum = (int64_t)((float)images.size() / (float)kTrainBatchSize + 0.5);
batch_x = std::vector< torch::Tensor>(batchNum);
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<> rand_index(0, (int)images.size() - 1);
#pragma omp parallel for
for (int batch_idx = 0; batch_idx < batchNum; batch_idx++)
{
std::vector<int> index(kTrainBatchSize);
if (shuffle)
{
for (int k = 0; k < kTrainBatchSize; k++)
{
index[k] = rand_index(mt);
}
}
else
{
for (int k = 0; k < kTrainBatchSize; k++)
{
index[k] = (batch_idx*kTrainBatchSize + k) % images.size();
}
}
get_BATCH(images, batch_x[batch_idx], kTrainBatchSize, index);
if (tensor_flatten_size(batch_x[batch_idx]) < kTrainBatchSize*in_channels*in_H*in_W)
{
dump_dim("batch_x", batch_x[batch_idx]);
std::cout << tensor_flatten_size(batch_x[batch_idx])
<< " < " << kTrainBatchSize << "*" << in_channels << "*"
<< in_H << "* " << in_W << "="
<< kTrainBatchSize*in_channels*in_H*in_W << std::endl;
printf("tensor size error.\n"); fflush(stdout);
throw error_exception("tensor size error.");
}
batch_x[batch_idx] = batch_x[batch_idx].view({ kTrainBatchSize, in_channels, in_H, in_W });
}
}
/**
* @param images array of input data
* @param labels array of labels output
* @param batch_x input data batch
* @param batch_y output data batch
* assume
*/
inline void generate_BATCH(
std::vector<torch::Tensor> &images,
std::vector<torch::Tensor> &labels,
std::vector< torch::Tensor>& batch_x,
std::vector< torch::Tensor>& batch_y
)
{
bool shuffle = batch_shuffle;
int batch_tmp = kTrainBatchSize;
if (batch_tmp > images.size())
{
batch_tmp = images.size();
}
//printf("%d -> lost:%d\n", images.size(), images.size() % kTrainBatchSize);
//for (int i = batch_tmp; i >= 2; i--)
//{
// if (images.size() % i == 0)
// {
// batch_tmp = i;
// break;
// }
//}
//printf("Please change:kTrainBatchSize:%d -> %d\n", kTrainBatchSize, batch_tmp);
const int batchNum = (int64_t)((float)images.size() / (float)kTrainBatchSize + 0.5);
batch_x = std::vector< torch::Tensor>(batchNum);
batch_y = std::vector< torch::Tensor>(batchNum);
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<> rand_index(0, (int)images.size() - 1);
#pragma omp parallel for
for (int batch_idx = 0; batch_idx < batchNum; batch_idx++)
{
std::vector<int> index(kTrainBatchSize);
if (shuffle)
{
for (int k = 0; k < kTrainBatchSize; k++)
{
index[k] = rand_index(mt);
}
}
else
{
for (int k = 0; k < kTrainBatchSize; k++)
{
index[k] = (batch_idx*kTrainBatchSize + k) % images.size();
}
}
get_BATCH(images, labels, batch_x[batch_idx], batch_y[batch_idx], kTrainBatchSize, index);
if (tensor_flatten_size(batch_x[batch_idx]) < kTrainBatchSize*in_channels*in_H*in_W)
{
dump_dim("batch_x", batch_x[batch_idx]);
std::cout << tensor_flatten_size(batch_x[batch_idx])
<< " < " << kTrainBatchSize << "*" << in_channels << "*"
<< in_H << "* " << in_W << "="
<<kTrainBatchSize*in_channels*in_H*in_W << std::endl;
printf("tensor size error.\n"); fflush(stdout);
throw error_exception("tensor size error.");
}
if (tensor_flatten_size(batch_y[batch_idx]) < kTrainBatchSize*out_channels*out_H*out_W)
{
dump_dim("batch_y", batch_y[batch_idx]);
std::cout << tensor_flatten_size(batch_y[batch_idx])
<< " < " << kTrainBatchSize << "*" << out_channels << "*"
<< out_H << "* " << out_W << "="
<< kTrainBatchSize*out_channels*out_H*out_W << std::endl;
printf("tensor size error.\n"); fflush(stdout);
throw error_exception("tensor size error.");
}
batch_x[batch_idx] = batch_x[batch_idx].view({ kTrainBatchSize, in_channels, in_H, in_W });
batch_y[batch_idx] = batch_y[batch_idx].view({ kTrainBatchSize, out_channels, out_H, out_W });
}
}
/**
* @param optimizer optimizing algorithm for training
* @param images array of input data
* @param labels array of labels output
* @param kTrainBatchSize number of mini-batch
* @param kNumberOfEpochs number of training epochs
* @param on_batch_enumerate callback for each mini-batch enumerate
* @param on_epoch_enumerate callback for each epoch
* assume
*/
bool fit(
torch::optim::Optimizer* optimizer,
std::vector<torch::Tensor> &images,
std::vector<torch::Tensor> &labels,
int kTrainBatchSize,
int kNumberOfEpochs,
std::function <void(void)> on_batch_enumerate = {},
std::function <void(void)> on_epoch_enumerate = {}
)
{
if (images.size() != labels.size()) {
return false;
}
if (images.size() < kTrainBatchSize || labels.size() < kTrainBatchSize) {
return false;
}
time_measurement.start();
int batchNum;
std::vector< torch::Tensor> batch_x;
std::vector< torch::Tensor> batch_y;
if (pre_make_batch)
{
generate_BATCH(images, labels, batch_x, batch_y);
batchNum = batch_x.size();
for (int i = 0; i < batchNum; i++)
{
batch_x[i] = batch_x[i].to(device);
batch_y[i] = batch_y[i].to(device);
}
}
std::vector<int> batch_idx_list;
for (int i = 0; i < batchNum; i++)
{
batch_idx_list.push_back(i);
}
std::mt19937 get_rand_mt;
optimizer->zero_grad();
stop_training_ = false;
model.get()->train(true);
int early_stopping_count = 0;
std::vector<float> loss_values;
for (size_t epoch = 0; epoch < kNumberOfEpochs && !stop_training_; ++epoch)
{
if (!pre_make_batch)
{
generate_BATCH(images, labels, batch_x, batch_y);
batchNum = batch_x.size();
for (int i = 0; i < batchNum; i++)
{
batch_x[i] = batch_x[i].to(device);
batch_y[i] = batch_y[i].to(device);
}
}
if (this->batch_shuffle)
{
std::shuffle(batch_idx_list.begin(), batch_idx_list.end(), get_rand_mt);
}
loss_value = 0.0;
float loss_ave = 0.0;
for (int b_idx = 0; b_idx < batchNum && !stop_training_; b_idx++)
{
const int batch_idx = batch_idx_list[b_idx];
torch::Tensor& data = batch_x[batch_idx];
torch::Tensor& targets = batch_y[batch_idx];
//data = data.to(device);
//targets = targets.to(device);
optimizer->zero_grad();
auto output = model.get()->forward(data);
//dump_dim("output", output);
//dump_dim("targets", targets);
targets = targets.reshape_as(output);
torch::Tensor loss;
if (classification)
{
loss = torch::nll_loss(output, targets.argmax(1));
}
else
{
loss = torch::mse_loss(output, targets);
}
if (std::isnan(loss.template item<float_t>()))
{
std::cout << "loss value is nan" << std::endl;
}
AT_ASSERT(!std::isnan(loss.template item<float_t>()));
loss.backward();
if ( fabs(clip_grad_value) > 0.0)
{
torch::nn::utils::clip_grad_norm_(model->parameters(), clip_grad_value);
}
optimizer->step();
loss_value = loss.template item<float_t>();
loss_ave += loss_value;
on_batch_enumerate();
model.get()->train(true);
}
if (stop_training_) break;
loss_value = loss_ave / kTrainBatchSize;
#if 10
if (patience < epoch )
{
//std::cout << "patience " << patience << std::endl;
//std::cout << "epoch " << epoch << std::endl;
//std::cout << "early_stopping_count " << early_stopping_count << std::endl;
if (loss_values.size() > 20)
{
const int n = loss_values.size() - loss_values.size() / 3;
float loss_mean = loss_values[0];
for (int i = 1; i < n; i++)
{
loss_mean += loss_values[i];
}
loss_mean /= n;
float sigma2 = 0;
for (int i = 0; i < n; i++)
{
sigma2 += (loss_values[i] - loss_mean)*(loss_values[i] - loss_mean);
}
sigma2 /= n;
loss_values.clear();
//2.0 => 97.7% 1.96 => 95%
const float u = loss_mean + 2.0*sigma2 / sqrt(n);
const float d = loss_mean - 2.0*sigma2 / sqrt(n);
std::cout << "u " << u << std::endl;
std::cout << "d " << d << std::endl;
std::cout << "sigma2 " << sigma2 << std::endl;
std::cout << "loss_mean " << loss_mean << std::endl;
std::cout << "loss_value " << loss_value << std::endl;
if (loss_value > u)
{
std::cout << "early_stopping_count " << early_stopping_count << std::endl;
early_stopping_count++;
if (get_early_stopping() && early_stopping_count > 3)
{
std::cout << "early_stopping" << std::endl;
break;
}
optimizer_lr_chg(optimizer_name, optimizer, 0.5);
//early_stopping_count = 0;
//getchar();
//auto &options = static_cast<torch::optim::OptimizerOptions &>(optimizer->param_groups()[0].options());
//options.lr(options.lr() * 0.1);
}
else
{
early_stopping_count = 0;
}
}
loss_values.push_back(loss_value);
}
#endif
on_epoch_enumerate();
model.get()->train(true);
}
time_measurement.stop();
return true;
}
/**
* @param optimizer optimizing algorithm for training
* @param images array of input data
* @param labels array of labels output
* @param kTrainBatchSize number of mini-batch
* @param kNumberOfEpochs number of training epochs
* @param on_batch_enumerate callback for each mini-batch enumerate
* @param on_epoch_enumerate callback for each epoch
* assume
*/
bool fit(
torch::optim::Optimizer* optimizer,
tiny_dnn::tensor_t &images,
tiny_dnn::tensor_t &labels,
int kTrainBatchSize,
int kNumberOfEpochs,
std::function <void(void)> on_batch_enumerate = {},
std::function <void(void)> on_epoch_enumerate = {}
)
{
std::vector<torch::Tensor> images_torch;
std::vector<torch::Tensor> labels_torch;
toTorchTensors(images, images_torch);
toTorchTensors(labels, labels_torch);
return fit(optimizer, images_torch, labels_torch, kTrainBatchSize, kNumberOfEpochs, on_batch_enumerate, on_epoch_enumerate);
}
/**
* @param optimizer optimizing algorithm for training
* @param images array of input data
* @param class_labels array of label-id for each input data(0-origin) label-id=on-hot-vector
* @param kTrainBatchSize number of mini batch
* @param kNumberOfEpochs number of training epochs
* @param on_batch_enumerate callback for each mini-batch enumerate
* @param on_epoch_enumerate callback for each epoch
* assume
*/
bool train(
torch::optim::Optimizer* optimizer,
tiny_dnn::tensor_t &images,
std::vector<tiny_dnn::label_t> &class_labels,
int kTrainBatchSize,
int kNumberOfEpochs,
std::function <void(void)> on_batch_enumerate = {},
std::function <void(void)> on_epoch_enumerate = {}
)
{
std::vector<tiny_dnn::vec_t> one_hot_vec;
label2vec(class_labels, one_hot_vec);
std::vector<torch::Tensor> images_torch;
std::vector<torch::Tensor> labels_torch;
toTorchTensors(images, images_torch);
toTorchTensors(one_hot_vec, labels_torch);
return fit(optimizer, images_torch, labels_torch, kTrainBatchSize, kNumberOfEpochs, on_batch_enumerate, on_epoch_enumerate);
}
bool test(
std::vector<torch::Tensor> &images,
std::vector<torch::Tensor> &labels,
int kTestBatchSize
)
{
if (images.size() != labels.size()) {
return false;
}
if (images.size() < kTestBatchSize || labels.size() < kTestBatchSize) {
return false;
}
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
float loss_ave = 0.0;
int correct = 0;
//int batch_tmp = kTestBatchSize;
//if (batch_tmp > images.size())
//{
// batch_tmp = images.size();
//}
//printf("%d -> lost:%d\n", images.size(), images.size() % kTestBatchSize);
//for (int i = batch_tmp; i >= 2; i--)
//{
// if (images.size() % i == 0)
// {
// batch_tmp = i;
// break;
// }
//}
//printf("Please change:kTestBatchSize:%d -> %d\n", kTestBatchSize, batch_tmp);
int testNum = images.size() / kTestBatchSize;
if (testNum == 0)
{
printf("input size < test batch size.\n"); fflush(stdout);
throw error_exception("input size < test batch size");
}
for (size_t test = 0; test < testNum; ++test)
{
torch::Tensor batch_x;
torch::Tensor batch_y;
std::vector<int> index(kTestBatchSize);
for (int k = 0; k < kTestBatchSize; k++)
{
index[k] = (kTestBatchSize * test + k) % images.size();
}
get_BATCH(images, labels, batch_x, batch_y, kTestBatchSize, index);
torch::Tensor& data = batch_x.view({ kTestBatchSize,in_channels, in_H, in_W });
torch::Tensor& targets = batch_y.view({ kTestBatchSize, out_channels, out_H, out_W });
data = data.to(device);
targets = targets.to(device);
torch::Tensor output = model.get()->forward(data);
targets = targets.reshape_as(output);
torch::Tensor loss;
if (classification)
{
loss = torch::nll_loss(output, targets.argmax(1));
}
else
{
loss = torch::mse_loss(output, targets);
}
AT_ASSERT(!std::isnan(loss.template item<float_t>()));
loss_value = loss.template item<float_t>();
loss_ave += loss_value;
if (classification)
{
auto pred = output.argmax(1);
correct += pred.eq(targets.argmax(1)).sum().template item<int64_t>();
}
// if (classification_one_hot_vector)
// {
//#if 1
// auto pred = output.argmax(1);
// correct += pred.eq(targets.argmax(1)).sum().template item<int64_t>();
//#else
//#pragma omp parallel for
// for (int k = 0; k < kTestBatchSize; k++)
// {
// correct += (vec_max_index(output[k]) == vec_max_index(targets[k])) ? 1 : 0;
//
// {
// //std::vector<tiny_dnn::tensor_t>& x = toTensor_t(output[k], 1, 1, 1, out_data_size());
// //std::vector<tiny_dnn::tensor_t>& y = toTensor_t(targets[k], 1, 1, 1, out_data_size());
// //AT_ASSERT(vec_max_index(x[0][0])== vec_max_index(output[k]));
// //AT_ASSERT(vec_max_index(y[0][0]) == vec_max_index(targets[k]));
//
// //tiny_dnn::tensor_t& x = toTensor_t(output[k], 1, 1, out_data_size());
// //tiny_dnn::tensor_t& y = toTensor_t(targets[k], 1, 1, out_data_size());
// //AT_ASSERT(vec_max_index(x[0]) == vec_max_index(output[k]));
// //AT_ASSERT(vec_max_index(y[0]) == vec_max_index(targets[k]));
//
// //tiny_dnn::vec_t& x = toTensor_t(output[k], out_data_size());
// //tiny_dnn::vec_t& y = toTensor_t(targets[k], out_data_size());
// //AT_ASSERT(vec_max_index(x) == vec_max_index(output[k]));
// //AT_ASSERT(vec_max_index(y) == vec_max_index(targets[k]));
// }
// }
//#endif
// }
}
if (classification)
{
std::printf(" Accuracy: %.3f%% Loss: %.3f\n", 100.0*static_cast<float_t>(correct) / images.size(), loss_ave / testNum);
}
else
{
std::printf("Loss: %.3f\n", loss_ave / testNum);
}
return true;
}
/**
* @param images array of input data
* @param labels array of output data
* @param kTestBatchSize number of mini batch
* assume
*/
bool test(
tiny_dnn::tensor_t &images,
tiny_dnn::tensor_t &labels,
int kTestBatchSize
)
{
std::vector<torch::Tensor> images_torch;
std::vector<torch::Tensor> labels_torch;
toTorchTensors(images, images_torch);
toTorchTensors(labels, labels_torch);
return test(images_torch, labels_torch, kTestBatchSize);
}
/**
* @param images array of input data
* @param labels array of lable(on-hot-vector) data
* @param kTestBatchSize number of mini batch
* assume
*/
bool test(
tiny_dnn::tensor_t &images,
std::vector<tiny_dnn::label_t> &class_labels,
int kTestBatchSize
)
{
std::vector<tiny_dnn::vec_t> one_hot_vec;
label2vec(class_labels, one_hot_vec);
std::vector<torch::Tensor> images_torch;
std::vector<torch::Tensor> labels_torch;
toTorchTensors(images, images_torch);
toTorchTensors(one_hot_vec, labels_torch);
return test(images_torch, labels_torch, kTestBatchSize);
}
torch::Tensor fprop(torch::Tensor &in) {
torch::NoGradGuard no_grad;
model->eval();
model.get()->train(false);
return model.get()->forward(in);
}
/**
* executes forward-propagation and returns output
**/
inline torch::Tensor predict(torch::Tensor& X)
{
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
torch::Tensor y = model.get()->forward(X.to(device));
return y;
}
/**
* executes forward-propagation and returns output
**/
inline std::vector<tiny_dnn::tensor_t> predict(torch::Tensor& X, const int batch)
{
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
torch::Tensor y = model.get()->forward(X.to(device));
std::vector<tiny_dnn::tensor_t> t;
toTensor_t(y, t, batch, out_channels, out_H, out_W);
return t;
}
/**
* executes forward-propagation and returns output
**/
inline std::vector<tiny_dnn::vec_t> predict(std::vector<tiny_dnn::vec_t>& X, int batch = 1)
{
//printf("X.size()=%d\n", X.size()); fflush(stdout);
std::vector<tiny_dnn::vec_t> out;
int batch_n = X.size() / batch;
if (X.size() < batch || batch == 1)
{
for (int i = 0; i < X.size(); i++)
{
out.emplace_back(predict(X[i]));
}
return out;
}
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
std::vector<torch::Tensor> n_batch_images(batch_n);
#pragma omp parallel for
for (int i = 0; i < batch_n; i++)
{
torch::Tensor images_torch = toTorchTensors(X[i*batch]).view({ 1, in_channels, in_H, in_W }).to(device);
auto batch_images = images_torch;
for (int j = 1; j < batch; j++)
{
torch::Tensor images_torch = toTorchTensors(X[i*batch + j]).view({ 1, in_channels, in_H, in_W }).to(device);
batch_images = torch::cat({ batch_images, images_torch }, 0);
}
n_batch_images[i] = batch_images;
}
for (int k = 0; k < batch_n; k++)
{
//cpp_torch::dump_dim("batch_images", batch_images);
torch::Tensor y = model.get()->forward(n_batch_images[k]);
y = y.view({ batch, out_channels, out_H, out_W });
for (int i = 0; i < batch; i++)
{
//cpp_torch::dump_dim("torch::Tensor y", y);
//std::cout << " " << out_data_size() << std::endl;
tiny_dnn::vec_t& t = toTensor_t(y[i], out_data_size());
out.emplace_back(t);
}
}
int n = X.size() % batch;
//printf("n=%d\n", n); fflush(stdout);
if (n > 0 && X.size() > batch)
{
for (int i = X.size() - n; i < X.size(); i++)
{
out.emplace_back(predict(X[i]));
}
}
//printf("out.size()=%d\n", out.size()); fflush(stdout);
return out;
}
/**
* executes forward-propagation and returns output
**/
inline tiny_dnn::vec_t predict(tiny_dnn::vec_t& X)
{
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
torch::Tensor& images_torch = toTorchTensors(X).view({ 1, in_channels, in_H, in_W }).to(device);
torch::Tensor y = model.get()->forward(images_torch);
//cpp_torch::dump_dim("torch::Tensor y", y);
//std::cout << " " << out_data_size() << std::endl;
tiny_dnn::vec_t& t = toTensor_t(y, out_data_size());
return t;
}
/**
* executes forward-propagation and returns output
**/
inline tiny_dnn::label_t predict_label(tiny_dnn::vec_t& X)
{
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
torch::Tensor images_torch = toTorchTensors(X).view({ 1, in_channels, in_H, in_W }).to(device);
torch::Tensor y = model.get()->forward(images_torch);
tiny_dnn::vec_t t = toTensor_t(y, out_data_size());
return vec_max_index(t);
}
inline void label2vec(const std::vector<tiny_dnn::label_t>& labels, std::vector<tiny_dnn::vec_t>& vec)
{
const size_t outdim = out_data_size();
vec.clear();
vec.resize(labels.size());
const size_t sz = labels.size();
#pragma omp parallel for
for (int i = 0; i < sz; i++)
{
tiny_dnn::vec_t t(outdim, 0);
t[labels[i]] = 1.0;
vec[i] = t;
}
}
inline void label2vec(const tiny_dnn::label_t& labels, tiny_dnn::vec_t& vec)
{
const size_t outdim = out_data_size();
tiny_dnn::vec_t t(outdim, 0);
t[labels] = 1.0;
vec = t;
}
inline tiny_dnn::label_t vec_max_index(torch::Tensor &out) {
return tiny_dnn::label_t(out.view({ out_data_size() }).argmax(0).template item<float_t>());
}
inline tiny_dnn::label_t vec_max_index(tiny_dnn::vec_t &out) {
return tiny_dnn::label_t(max_index(out));
}
inline tiny_dnn::label_t vec_max_index(tiny_dnn::tensor_t &out) {
return tiny_dnn::label_t(max_index(out[0]));
}
float_t get_loss(std::vector<tiny_dnn::vec_t> &in, std::vector<tiny_dnn::label_t> &t, int batchSize) {
std::vector<tiny_dnn::vec_t> vec;
label2vec(t, vec);
return get_loss(in, vec, batchSize);
}
float_t get_loss( std::vector<tiny_dnn::vec_t> &in, std::vector<tiny_dnn::vec_t> &t, int BatchSize) {
float_t sum_loss = float_t(0);
std::vector<torch::Tensor> images;
std::vector<torch::Tensor> labels;
//printf("in:%d\n", in.size());
toTorchTensors(in, images);
toTorchTensors(t, labels);
//int batch_tmp = BatchSize;
//if (batch_tmp > in.size())
//{
// batch_tmp = in.size();
//}
//printf("lost:%d\n", in.size() % BatchSize);
//for (int i = batch_tmp; i >= 2; i--)
//{
// if (images.size() % i == 0)
// {
// batch_tmp = i;
// break;
// }
//}
//printf("change:BatchSize:%d -> %d\n", BatchSize, batch_tmp);
//BatchSize = batch_tmp;
const int batchNum = (const int)((float)in.size() / BatchSize +0.5);
if (batchNum == 0)
{
printf("input size:%d BatchSize:%d\n", in.size(), BatchSize);
throw error_exception("input size < Batch Size");
}
std::vector< torch::Tensor> batch_x(batchNum);
std::vector< torch::Tensor> batch_y(batchNum);
//torch::NoGradGuard no_grad;
//model->eval();
model.get()->train(false);
std::vector<float_t> loss_list(in.size(), 0.0);
//#pragma omp parallel for
for (int i = 0; i < batchNum; i++) {
//if (!pre_make_batch)
{
std::vector<int> index(BatchSize);
for (int k = 0; k < BatchSize; k++)
{
index[k] = (i* BatchSize + k) % images.size();
}
get_BATCH(images, labels, batch_x[i], batch_y[i], BatchSize, index);
batch_x[i] = batch_x[i].view({ BatchSize, in_channels, in_H, in_W });
batch_y[i] = batch_y[i].view({ BatchSize, out_channels, out_H, out_W });
}
torch::Tensor& input = batch_x[i].to(device);
torch::Tensor& targets = batch_y[i].to(device);
torch::Tensor predicted = predict(input).to(device);
//dump_dim(std::string("predicted"), predicted);
//dump_dim(std::string("targets"), targets);
torch::Tensor loss;
if (classification)
{
loss = torch::nll_loss(predicted, targets.view_as(predicted).argmax(1));
}
else
{
loss = torch::mse_loss(predicted.view_as(targets), targets);
}
AT_ASSERT(!std::isnan(loss.template item<float_t>()));
//dump_dim(std::string("loss"), loss);
//std::cout << loss << std::endl;
loss_list[i] = loss.template item<float_t>();
}
for (size_t i = 0; i < batchNum; i++) {
sum_loss += loss_list[i];
}
return sum_loss/ BatchSize;
}
void set_tolerance(const float max_tol, const float min_tol, int div = 5)
{
if (div < 3) div = 3;
Tolerance_Set.resize(div);
for (int i = 0; i < div; i++)
{
Tolerance_Set[i] = (max_tol + i*(min_tol - max_tol) / (div - 1.0));
}
}
std::vector<float_t>& get_tolerance()
{
return Tolerance_Set;
}
/*
* output vector output[0..tolerance_set.size()-1]=num_success, output[tolerance_set.size()]=image of size,
*/
std::vector<int> get_accuracy(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels, std::vector<float_t>& tolerance_set)
{
std::vector<int> result(tolerance_set.size()+1);
if (images.size() == 0)
{
return result;
}
result[tolerance_set.size()] = images.size();
for (int i = 0; i < images.size(); i++)
{
tiny_dnn::vec_t& predict_y = predict(images[i]);
const tiny_dnn::vec_t& actual = labels[i];
AT_ASSERT(predict_y.size() == actual.size());
float sum = 0.0;
for (int k = 0; k < predict_y.size(); k++)
{
sum += (predict_y[k] - actual[k])*(predict_y[k] - actual[k]);
}
sum /= predict_y.size();
for (int j = 0; j < tolerance_set.size(); j++)
{
if (sum < tolerance_set[j])
{
result[j]++;
}
}
}
return result;
}
tiny_dnn::result get_accuracy( tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels, int batch = 1)
{
tiny_dnn::result result;
if (images.size() == 0)
{
result.num_total = 1;
return result;
}
const size_t sz = images.size();
#if 10
std::vector< tiny_dnn::label_t> predicted_list(sz, 0);
std::vector< tiny_dnn::label_t>actual_list(sz, 0);
std::vector<tiny_dnn::vec_t>& n_predict_y = predict(images, batch);
#pragma omp parallel for
for (int i = 0; i < sz; i++)
{
//tiny_dnn::vec_t& predict_y = predict(images[i]);
tiny_dnn::vec_t& predict_y = n_predict_y[i];
predicted_list[i] = vec_max_index(predict_y);
actual_list[i] = vec_max_index(labels[i]);
}
for (int i = 0; i < sz; i++)
{
if (predicted_list[i] == actual_list[i]) result.num_success++;
result.num_total++;
result.confusion_matrix[predicted_list[i]][actual_list[i]]++;
}
#else
for (int i = 0; i < sz; i++)
{
tiny_dnn::vec_t& predict_y = predict(images[i]);
const tiny_dnn::label_t predicted = vec_max_index(predict_y);
const tiny_dnn::label_t actual = vec_max_index(labels[i]);
if (predicted == actual) result.num_success++;
result.num_total++;
result.confusion_matrix[predicted][actual]++;
}
#endif
return result;
}
std::vector<int> test_tolerance(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels)
{
AT_ASSERT(Tolerance_Set.size() != 0);
return get_accuracy(images, labels, Tolerance_Set);
}
// labels[#] = 0,1,..class-1
tiny_dnn::result get_accuracy(tiny_dnn::tensor_t& images, std::vector <tiny_dnn::label_t>& labels)
{
std::vector<tiny_dnn::vec_t> vec;
label2vec(labels, vec); //on-hot-vector
return get_accuracy(images, vec);
}
tiny_dnn::result test(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels)
{
return get_accuracy(images, labels);
}
// labels[#] = on-hot-vector
tiny_dnn::result test(tiny_dnn::tensor_t& images, std::vector <tiny_dnn::label_t>& labels)
{
return get_accuracy(images, labels);
}
inline int in_data_size() const
{
return in_channels * in_H*in_W;
}
inline int out_data_size() const
{
return out_channels * out_H*out_W;
}
inline void save(std::string& filename)
{
torch::save(model, filename);
}
inline void load(std::string& filename)
{
try
{
//CUDA information is also included in the data that has been learned and serialized by CUDA.
// map_location = device
torch::load(model, filename/*, device*/);
model.get()->to(device);
}
catch (c10::Error& err)
{
printf("load error[%s]\n", err.what());
}
}
};
void print_ConfusionMatrix(tiny_dnn::result& res)
{
//ConfusionMatrix
std::cout << "ConfusionMatrix:" << std::endl;
res.print_detail(std::cout);
std::cout << res.num_success << "/" << res.num_total << std::endl;
res.print_summary(std::cout);
//printf("accuracy:%.3f%%\n", res.accuracy());
}
void print_ConfusionMatrix(std::vector<int>& res, std::vector<float_t>& tol)
{
//ConfusionMatrix
std::cout << "ConfusionMatrix:" << std::endl;
for (int i = 0; i < res.size()-1; i++)
{
printf("tolerance:%.4f %d / %d accuracy:%.3f%%\n", tol[i], res[i], res.back(),
100.0*(float_t)res[i] / (float_t)res.back());
}
}
}
#endif
|
GB_binop__lor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint8)
// A*D function (colscale): GB (_AxD__lor_uint8)
// D*A function (rowscale): GB (_DxB__lor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint8)
// C=scalar+B GB (_bind1st__lor_uint8)
// C=scalar+B' GB (_bind1st_tran__lor_uint8)
// C=A+scalar GB (_bind2nd__lor_uint8)
// C=A'+scalar GB (_bind2nd_tran__lor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT8 || GxB_NO_LOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__exp_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp_fc64_fc64)
// op(A') function: GB (_unop_tran__exp_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = cexp (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cexp (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = cexp (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cexp (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cexp (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
_vet.c | /* Generated by Cython 0.29.13 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"/usr/lib/python3/dist-packages/numpy/core/include/numpy/arrayobject.h",
"/usr/lib/python3/dist-packages/numpy/core/include/numpy/ufuncobject.h"
],
"extra_compile_args": [
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"include_dirs": [
"/usr/lib/python3/dist-packages/numpy/core/include"
],
"language": "c",
"name": "pysteps.motion._vet",
"sources": [
"pysteps/motion/_vet.pyx"
]
},
"module_name": "pysteps.motion._vet"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_13"
#define CYTHON_HEX_VERSION 0x001D0DF0
#define CYTHON_FUTURE_DIVISION 1
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__pysteps__motion___vet
#define __PYX_HAVE_API__pysteps__motion___vet
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"pysteps/motion/_vet.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":802
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":806
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":808
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":811
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":813
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "pysteps/motion/_vet.pyx":13
* cimport numpy as np
*
* ctypedef np.float64_t float64 # <<<<<<<<<<<<<<
* ctypedef np.int8_t int8
* ctypedef np.intp_t intp
*/
typedef __pyx_t_5numpy_float64_t __pyx_t_7pysteps_6motion_4_vet_float64;
/* "pysteps/motion/_vet.pyx":14
*
* ctypedef np.float64_t float64
* ctypedef np.int8_t int8 # <<<<<<<<<<<<<<
* ctypedef np.intp_t intp
*
*/
typedef __pyx_t_5numpy_int8_t __pyx_t_7pysteps_6motion_4_vet_int8;
/* "pysteps/motion/_vet.pyx":15
* ctypedef np.float64_t float64
* ctypedef np.int8_t int8
* ctypedef np.intp_t intp # <<<<<<<<<<<<<<
*
* from libc.math cimport floor, round
*/
typedef __pyx_t_5numpy_intp_t __pyx_t_7pysteps_6motion_4_vet_intp;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":816
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":817
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2)
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* IterFinish.proto */
static CYTHON_INLINE int __Pyx_IterFinish(void);
/* UnpackItemEndCheck.proto */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2))
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
#define __Pyx_PyObject_Dict_GetItem(obj, name)\
(likely(PyDict_CheckExact(obj)) ?\
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'libc.math' */
/* Module declarations from 'pysteps.motion._vet' */
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp, __pyx_t_7pysteps_6motion_4_vet_intp); /*proto*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64, __pyx_t_7pysteps_6motion_4_vet_float64); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64 = { "float64", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_float64), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8 = { "int8", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_int8), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_int8), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp = { "intp", NULL, sizeof(__pyx_t_7pysteps_6motion_4_vet_intp), { 0 }, 0, IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_7pysteps_6motion_4_vet_intp), 0 };
#define __Pyx_MODULE_NAME "pysteps.motion._vet"
extern int __pyx_module_is_main_pysteps__motion___vet;
int __pyx_module_is_main_pysteps__motion___vet = 0;
/* Implementation of 'pysteps.motion._vet' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_zip;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_l[] = "l";
static const char __pyx_k_m[] = "m";
static const char __pyx_k_x[] = "x";
static const char __pyx_k_y[] = "y";
static const char __pyx_k_dx[] = "dx";
static const char __pyx_k_dy[] = "dy";
static const char __pyx_k_l0[] = "l0";
static const char __pyx_k_l1[] = "l1";
static const char __pyx_k_ll[] = "ll";
static const char __pyx_k_m0[] = "m0";
static const char __pyx_k_m1[] = "m1";
static const char __pyx_k_mm[] = "mm";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_nx[] = "nx";
static const char __pyx_k_ny[] = "ny";
static const char __pyx_k_xy[] = "xy";
static const char __pyx_k_f00[] = "f00";
static const char __pyx_k_f01[] = "f01";
static const char __pyx_k_f10[] = "f10";
static const char __pyx_k_f11[] = "f11";
static const char __pyx_k_l_i[] = "l_i";
static const char __pyx_k_m_j[] = "m_j";
static const char __pyx_k_sum[] = "sum";
static const char __pyx_k_zip[] = "zip";
static const char __pyx_k_axis[] = "axis";
static const char __pyx_k_full[] = "full";
static const char __pyx_k_int8[] = "int8";
static const char __pyx_k_intp[] = "intp";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mask[] = "mask";
static const char __pyx_k_mean[] = "mean";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_warp[] = "_warp";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_i_max[] = "i_max";
static const char __pyx_k_i_min[] = "i_min";
static const char __pyx_k_i_sec[] = "i_sec";
static const char __pyx_k_image[] = "image";
static const char __pyx_k_j_max[] = "j_max";
static const char __pyx_k_j_min[] = "j_min";
static const char __pyx_k_j_sec[] = "j_sec";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_arange[] = "arange";
static const char __pyx_k_buffer[] = "buffer";
static const char __pyx_k_counts[] = "counts";
static const char __pyx_k_df_dx2[] = "df_dx2";
static const char __pyx_k_df_dy2[] = "df_dy2";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_unique[] = "unique";
static const char __pyx_k_x_ceil[] = "x_ceil";
static const char __pyx_k_y_ceil[] = "y_ceil";
static const char __pyx_k_df_dxdy[] = "df_dxdy";
static const char __pyx_k_float64[] = "float64";
static const char __pyx_k_i_shift[] = "i_shift";
static const char __pyx_k_j_shift[] = "j_shift";
static const char __pyx_k_reshape[] = "reshape";
static const char __pyx_k_x_float[] = "x_float";
static const char __pyx_k_x_floor[] = "x_floor";
static const char __pyx_k_x_guess[] = "x_guess";
static const char __pyx_k_y_float[] = "y_float";
static const char __pyx_k_y_floor[] = "y_floor";
static const char __pyx_k_y_guess[] = "y_guess";
static const char __pyx_k_gradient[] = "gradient";
static const char __pyx_k_new_image[] = "new_image";
static const char __pyx_k_residuals[] = "residuals";
static const char __pyx_k_x_max_int[] = "x_max_int";
static const char __pyx_k_x_sectors[] = "x_sectors";
static const char __pyx_k_y_max_int[] = "y_max_int";
static const char __pyx_k_y_sectors[] = "y_sectors";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_morph_mask[] = "morph_mask";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_grad_smooth[] = "grad_smooth";
static const char __pyx_k_input_image[] = "input_image";
static const char __pyx_k_interp_coef[] = "interp_coef";
static const char __pyx_k_sector_area[] = "sector_area";
static const char __pyx_k_smooth_gain[] = "smooth_gain";
static const char __pyx_k_x_max_float[] = "x_max_float";
static const char __pyx_k_y_max_float[] = "y_max_float";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_displacement[] = "displacement";
static const char __pyx_k_morphed_mask[] = "morphed_mask";
static const char __pyx_k_return_index[] = "return_index";
static const char __pyx_k_x_image_size[] = "x_image_size";
static const char __pyx_k_y_image_size[] = "y_image_size";
static const char __pyx_k_cost_function[] = "_cost_function";
static const char __pyx_k_gradient_data[] = "_gradient_data";
static const char __pyx_k_morphed_image[] = "morphed_image";
static const char __pyx_k_return_counts[] = "return_counts";
static const char __pyx_k_x_sector_size[] = "x_sector_size";
static const char __pyx_k_y_sector_size[] = "y_sector_size";
static const char __pyx_k_grad_residuals[] = "grad_residuals";
static const char __pyx_k_template_image[] = "template_image";
static const char __pyx_k_gradient_values[] = "gradient_values";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_smoothness_penalty[] = "smoothness_penalty";
static const char __pyx_k_pysteps_motion__vet[] = "pysteps.motion._vet";
static const char __pyx_k_sector_displacement[] = "sector_displacement";
static const char __pyx_k_pysteps_motion__vet_pyx[] = "pysteps/motion/_vet.pyx";
static const char __pyx_k_inloop_smoothness_penalty[] = "inloop_smoothness_penalty";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_Error_computing_cost_function[] = "Error computing cost function.\n";
static const char __pyx_k_Cython_module_for_morphing_and[] = "\nCython module for morphing and cost functions implementations used in\nin the Variation Echo Tracking Algorithm\n";
static const char __pyx_k_The_number_of_sectors_in_x_axis[] = "The number of sectors in x axis (axis=0) don't divide the image size";
static const char __pyx_k_The_number_of_sectors_in_y_axis[] = "The number of sectors in y axis (axis=1) don't divide the image size";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_u_Error_computing_cost_function;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_kp_u_The_number_of_sectors_in_x_axis;
static PyObject *__pyx_kp_u_The_number_of_sectors_in_y_axis;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_arange;
static PyObject *__pyx_n_s_axis;
static PyObject *__pyx_n_s_buffer;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_cost_function;
static PyObject *__pyx_n_s_counts;
static PyObject *__pyx_n_s_df_dx2;
static PyObject *__pyx_n_s_df_dxdy;
static PyObject *__pyx_n_s_df_dy2;
static PyObject *__pyx_n_s_displacement;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_dx;
static PyObject *__pyx_n_s_dy;
static PyObject *__pyx_n_s_f00;
static PyObject *__pyx_n_s_f01;
static PyObject *__pyx_n_s_f10;
static PyObject *__pyx_n_s_f11;
static PyObject *__pyx_n_s_float64;
static PyObject *__pyx_n_u_float64;
static PyObject *__pyx_n_s_full;
static PyObject *__pyx_n_s_grad_residuals;
static PyObject *__pyx_n_s_grad_smooth;
static PyObject *__pyx_n_s_gradient;
static PyObject *__pyx_n_s_gradient_data;
static PyObject *__pyx_n_s_gradient_values;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_i_max;
static PyObject *__pyx_n_s_i_min;
static PyObject *__pyx_n_s_i_sec;
static PyObject *__pyx_n_s_i_shift;
static PyObject *__pyx_n_s_image;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_inloop_smoothness_penalty;
static PyObject *__pyx_n_s_input_image;
static PyObject *__pyx_n_s_int8;
static PyObject *__pyx_n_s_interp_coef;
static PyObject *__pyx_n_s_intp;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_j_max;
static PyObject *__pyx_n_s_j_min;
static PyObject *__pyx_n_s_j_sec;
static PyObject *__pyx_n_s_j_shift;
static PyObject *__pyx_n_s_l;
static PyObject *__pyx_n_s_l0;
static PyObject *__pyx_n_s_l1;
static PyObject *__pyx_n_s_l_i;
static PyObject *__pyx_n_s_ll;
static PyObject *__pyx_n_s_m;
static PyObject *__pyx_n_s_m0;
static PyObject *__pyx_n_s_m1;
static PyObject *__pyx_n_s_m_j;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_mask;
static PyObject *__pyx_n_s_mean;
static PyObject *__pyx_n_s_mm;
static PyObject *__pyx_n_s_morph_mask;
static PyObject *__pyx_n_s_morphed_image;
static PyObject *__pyx_n_s_morphed_mask;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_new_image;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_nx;
static PyObject *__pyx_n_s_ny;
static PyObject *__pyx_n_s_pysteps_motion__vet;
static PyObject *__pyx_kp_s_pysteps_motion__vet_pyx;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reshape;
static PyObject *__pyx_n_s_residuals;
static PyObject *__pyx_n_s_return_counts;
static PyObject *__pyx_n_s_return_index;
static PyObject *__pyx_n_s_sector_area;
static PyObject *__pyx_n_s_sector_displacement;
static PyObject *__pyx_n_s_smooth_gain;
static PyObject *__pyx_n_s_smoothness_penalty;
static PyObject *__pyx_n_s_sum;
static PyObject *__pyx_n_s_template_image;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_unique;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_warp;
static PyObject *__pyx_n_s_x;
static PyObject *__pyx_n_s_x_ceil;
static PyObject *__pyx_n_s_x_float;
static PyObject *__pyx_n_s_x_floor;
static PyObject *__pyx_n_s_x_guess;
static PyObject *__pyx_n_s_x_image_size;
static PyObject *__pyx_n_s_x_max_float;
static PyObject *__pyx_n_s_x_max_int;
static PyObject *__pyx_n_s_x_sector_size;
static PyObject *__pyx_n_s_x_sectors;
static PyObject *__pyx_n_s_xy;
static PyObject *__pyx_n_s_y;
static PyObject *__pyx_n_s_y_ceil;
static PyObject *__pyx_n_s_y_float;
static PyObject *__pyx_n_s_y_floor;
static PyObject *__pyx_n_s_y_guess;
static PyObject *__pyx_n_s_y_image_size;
static PyObject *__pyx_n_s_y_max_float;
static PyObject *__pyx_n_s_y_max_int;
static PyObject *__pyx_n_s_y_sector_size;
static PyObject *__pyx_n_s_y_sectors;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_n_s_zip;
static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient); /* proto */
static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_float_1_0;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_2;
static PyObject *__pyx_int_4;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_slice__3;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_codeobj__14;
static PyObject *__pyx_codeobj__16;
/* Late includes */
/* "pysteps/motion/_vet.pyx":21
* cimport numpy as np
*
* cdef inline float64 float_abs(float64 a) nogil: return a if a > 0. else -a # <<<<<<<<<<<<<<
* """ Return the absolute value of a float """
*
*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet_float_abs(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_a) {
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_1;
if (((__pyx_v_a > 0.) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = (-__pyx_v_a);
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":24
* """ Return the absolute value of a float """
*
* cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b # <<<<<<<<<<<<<<
*
* cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b
*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_min(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) {
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_r;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1;
if (((__pyx_v_a < __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":26
* cdef inline intp int_min(intp a, intp b) nogil: return a if a < b else b
*
* cdef inline intp int_max(intp a, intp b) nogil: return a if a > b else b # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_intp __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_a, __pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_b) {
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_r;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_1;
if (((__pyx_v_a > __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":29
*
* @cython.cdivision(True)
* cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<<
* float64 x1,
* float64 x2,
*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2) {
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r;
int __pyx_t_1;
/* "pysteps/motion/_vet.pyx":39
* """
*
* if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<<
* return y1
*
*/
__pyx_t_1 = ((__pyx_f_7pysteps_6motion_4_vet_float_abs((__pyx_v_x1 - __pyx_v_x2)) < 1e-6) != 0);
if (__pyx_t_1) {
/* "pysteps/motion/_vet.pyx":40
*
* if float_abs(x1 - x2) < 1e-6:
* return y1 # <<<<<<<<<<<<<<
*
* return y1 + (x - x1) * (y2 - y1) / (x2 - x1)
*/
__pyx_r = __pyx_v_y1;
goto __pyx_L0;
/* "pysteps/motion/_vet.pyx":39
* """
*
* if float_abs(x1 - x2) < 1e-6: # <<<<<<<<<<<<<<
* return y1
*
*/
}
/* "pysteps/motion/_vet.pyx":42
* return y1
*
* return y1 + (x - x1) * (y2 - y1) / (x2 - x1) # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_r = (__pyx_v_y1 + (((__pyx_v_x - __pyx_v_x1) * (__pyx_v_y2 - __pyx_v_y1)) / (__pyx_v_x2 - __pyx_v_x1)));
goto __pyx_L0;
/* "pysteps/motion/_vet.pyx":29
*
* @cython.cdivision(True)
* cdef inline float64 _linear_interpolation(float64 x, # <<<<<<<<<<<<<<
* float64 x1,
* float64 x2,
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":45
*
* @cython.cdivision(True)
* cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<<
* float64 y,
* float64 x1,
*/
static CYTHON_INLINE __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_f_7pysteps_6motion_4_vet__bilinear_interpolation(__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y1, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y2, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q11, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q12, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q21, __pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_q22) {
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y1;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f_x_y2;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_r;
/* "pysteps/motion/_vet.pyx":59
* cdef float64 f_x_y1, f_x_y2
*
* f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21) # <<<<<<<<<<<<<<
* f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22)
* return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2)
*/
__pyx_v_f_x_y1 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q11, __pyx_v_q21);
/* "pysteps/motion/_vet.pyx":60
*
* f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21)
* f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22) # <<<<<<<<<<<<<<
* return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2)
*
*/
__pyx_v_f_x_y2 = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_x, __pyx_v_x1, __pyx_v_x2, __pyx_v_q12, __pyx_v_q22);
/* "pysteps/motion/_vet.pyx":61
* f_x_y1 = _linear_interpolation(x, x1, x2, q11, q21)
* f_x_y2 = _linear_interpolation(x, x1, x2, q12, q22)
* return _linear_interpolation(y, y1, y2, f_x_y1, f_x_y2) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__pyx_r = __pyx_f_7pysteps_6motion_4_vet__linear_interpolation(__pyx_v_y, __pyx_v_y1, __pyx_v_y2, __pyx_v_f_x_y1, __pyx_v_f_x_y2);
goto __pyx_L0;
/* "pysteps/motion/_vet.pyx":45
*
* @cython.cdivision(True)
* cdef inline float64 _bilinear_interpolation(float64 x, # <<<<<<<<<<<<<<
* float64 y,
* float64 x1,
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":67
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<<
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
*/
/* Python wrapper */
static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7pysteps_6motion_4_vet__warp[] = "\n Morph image by applying a displacement field (Warping).\n \n The new image is created by selecting for each position the values of the\n input image at the positions given by the x and y displacements. \n The routine works in a backward sense. \n The displacement vectors have to refer to their destination.\n \n For more information in Morphing functions see Section 3 in \n `Beezley and Mandel (2008)`_.\n \n Beezley, J. D., & Mandel, J. (2008). \n Morphing ensemble Kalman filters. Tellus A, 60(1), 131-140.\n \n .. _`Beezley and Mandel (2008)`: http://dx.doi.org/10.1111/ j.1600-0870.2007.00275.x\n\n \n The displacement field in x and y directions and the image must have the\n same dimensions.\n \n The morphing is executed in parallel over x axis.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge. Those pixels are indicated by values greater\n than 1 in the output mask.\n \n Parameters\n ----------\n \n image : ndarray (ndim = 2)\n Image to morph\n \n displacement : ndarray (ndim = 3)\n Displacement field to be applied (Warping). \n \n The dimensions are:\n displacement [ x (0) or y (1) , \n i index of pixel, j index of pixel ]\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n\n Returns\n -------\n \n image : ndarray (float64 ,ndim = 2)\n Morphed image.\n \n mask : ndarray (int8 ,ndim = 2)\n Invalid values mask. Points outside the boundaries are masked.\n Values greater than 1, indicate masked values.\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n ";
static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_1_warp = {"_warp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_1_warp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet__warp};
static PyObject *__pyx_pw_7pysteps_6motion_4_vet_1_warp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_image = 0;
PyArrayObject *__pyx_v_mask = 0;
PyArrayObject *__pyx_v_displacement = 0;
int __pyx_v_gradient;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("_warp (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_image,&__pyx_n_s_mask,&__pyx_n_s_displacement,&__pyx_n_s_gradient,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_image)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 1); __PYX_ERR(0, 67, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_displacement)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, 2); __PYX_ERR(0, 67, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient);
if (value) { values[3] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_warp") < 0)) __PYX_ERR(0, 67, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_image = ((PyArrayObject *)values[0]);
__pyx_v_mask = ((PyArrayObject *)values[1]);
__pyx_v_displacement = ((PyArrayObject *)values[2]);
if (values[3]) {
__pyx_v_gradient = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error)
} else {
/* "pysteps/motion/_vet.pyx":70
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
* bint gradient=False): # <<<<<<<<<<<<<<
* """
* Morph image by applying a displacement field (Warping).
*/
__pyx_v_gradient = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("_warp", 0, 3, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_image), __pyx_ptype_5numpy_ndarray, 1, "image", 0))) __PYX_ERR(0, 67, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 68, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_displacement), __pyx_ptype_5numpy_ndarray, 1, "displacement", 0))) __PYX_ERR(0, 69, __pyx_L1_error)
__pyx_r = __pyx_pf_7pysteps_6motion_4_vet__warp(__pyx_self, __pyx_v_image, __pyx_v_mask, __pyx_v_displacement, __pyx_v_gradient);
/* "pysteps/motion/_vet.pyx":67
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<<
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7pysteps_6motion_4_vet__warp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_image, PyArrayObject *__pyx_v_mask, PyArrayObject *__pyx_v_displacement, int __pyx_v_gradient) {
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_nx;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_ny;
PyArrayObject *__pyx_v_new_image = 0;
PyArrayObject *__pyx_v_morphed_mask = 0;
PyArrayObject *__pyx_v_gradient_values = 0;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_max_int;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_max_int;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_max_float;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_max_float;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_x_float;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_y_float;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dx;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_dy;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_floor;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_ceil;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_floor;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_ceil;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f00;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f10;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f01;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_f11;
__Pyx_LocalBuf_ND __pyx_pybuffernd_displacement;
__Pyx_Buffer __pyx_pybuffer_displacement;
__Pyx_LocalBuf_ND __pyx_pybuffernd_gradient_values;
__Pyx_Buffer __pyx_pybuffer_gradient_values;
__Pyx_LocalBuf_ND __pyx_pybuffernd_image;
__Pyx_Buffer __pyx_pybuffer_image;
__Pyx_LocalBuf_ND __pyx_pybuffernd_mask;
__Pyx_Buffer __pyx_pybuffer_mask;
__Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_mask;
__Pyx_Buffer __pyx_pybuffer_morphed_mask;
__Pyx_LocalBuf_ND __pyx_pybuffernd_new_image;
__Pyx_Buffer __pyx_pybuffer_new_image;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyArrayObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_9;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_10;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_11;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_12;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_13;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
Py_ssize_t __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
int __pyx_t_21;
Py_ssize_t __pyx_t_22;
Py_ssize_t __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
Py_ssize_t __pyx_t_27;
Py_ssize_t __pyx_t_28;
Py_ssize_t __pyx_t_29;
Py_ssize_t __pyx_t_30;
Py_ssize_t __pyx_t_31;
Py_ssize_t __pyx_t_32;
Py_ssize_t __pyx_t_33;
Py_ssize_t __pyx_t_34;
Py_ssize_t __pyx_t_35;
Py_ssize_t __pyx_t_36;
Py_ssize_t __pyx_t_37;
Py_ssize_t __pyx_t_38;
Py_ssize_t __pyx_t_39;
Py_ssize_t __pyx_t_40;
Py_ssize_t __pyx_t_41;
Py_ssize_t __pyx_t_42;
Py_ssize_t __pyx_t_43;
Py_ssize_t __pyx_t_44;
Py_ssize_t __pyx_t_45;
Py_ssize_t __pyx_t_46;
Py_ssize_t __pyx_t_47;
Py_ssize_t __pyx_t_48;
Py_ssize_t __pyx_t_49;
Py_ssize_t __pyx_t_50;
Py_ssize_t __pyx_t_51;
Py_ssize_t __pyx_t_52;
Py_ssize_t __pyx_t_53;
Py_ssize_t __pyx_t_54;
Py_ssize_t __pyx_t_55;
Py_ssize_t __pyx_t_56;
Py_ssize_t __pyx_t_57;
Py_ssize_t __pyx_t_58;
Py_ssize_t __pyx_t_59;
Py_ssize_t __pyx_t_60;
Py_ssize_t __pyx_t_61;
Py_ssize_t __pyx_t_62;
Py_ssize_t __pyx_t_63;
Py_ssize_t __pyx_t_64;
Py_ssize_t __pyx_t_65;
Py_ssize_t __pyx_t_66;
Py_ssize_t __pyx_t_67;
Py_ssize_t __pyx_t_68;
Py_ssize_t __pyx_t_69;
Py_ssize_t __pyx_t_70;
Py_ssize_t __pyx_t_71;
Py_ssize_t __pyx_t_72;
Py_ssize_t __pyx_t_73;
Py_ssize_t __pyx_t_74;
Py_ssize_t __pyx_t_75;
__Pyx_RefNannySetupContext("_warp", 0);
__pyx_pybuffer_new_image.pybuffer.buf = NULL;
__pyx_pybuffer_new_image.refcount = 0;
__pyx_pybuffernd_new_image.data = NULL;
__pyx_pybuffernd_new_image.rcbuffer = &__pyx_pybuffer_new_image;
__pyx_pybuffer_morphed_mask.pybuffer.buf = NULL;
__pyx_pybuffer_morphed_mask.refcount = 0;
__pyx_pybuffernd_morphed_mask.data = NULL;
__pyx_pybuffernd_morphed_mask.rcbuffer = &__pyx_pybuffer_morphed_mask;
__pyx_pybuffer_gradient_values.pybuffer.buf = NULL;
__pyx_pybuffer_gradient_values.refcount = 0;
__pyx_pybuffernd_gradient_values.data = NULL;
__pyx_pybuffernd_gradient_values.rcbuffer = &__pyx_pybuffer_gradient_values;
__pyx_pybuffer_image.pybuffer.buf = NULL;
__pyx_pybuffer_image.refcount = 0;
__pyx_pybuffernd_image.data = NULL;
__pyx_pybuffernd_image.rcbuffer = &__pyx_pybuffer_image;
__pyx_pybuffer_mask.pybuffer.buf = NULL;
__pyx_pybuffer_mask.refcount = 0;
__pyx_pybuffernd_mask.data = NULL;
__pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask;
__pyx_pybuffer_displacement.pybuffer.buf = NULL;
__pyx_pybuffer_displacement.refcount = 0;
__pyx_pybuffernd_displacement.data = NULL;
__pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error)
}
__pyx_pybuffernd_image.diminfo[0].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_image.diminfo[0].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_image.diminfo[1].strides = __pyx_pybuffernd_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_image.diminfo[1].shape = __pyx_pybuffernd_image.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error)
}
__pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 67, __pyx_L1_error)
}
__pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2];
/* "pysteps/motion/_vet.pyx":130
* """
*
* cdef intp nx = <intp> image.shape[0] # <<<<<<<<<<<<<<
* cdef intp ny = <intp> image.shape[1]
*
*/
__pyx_v_nx = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[0]));
/* "pysteps/motion/_vet.pyx":131
*
* cdef intp nx = <intp> image.shape[0]
* cdef intp ny = <intp> image.shape[1] # <<<<<<<<<<<<<<
*
* cdef np.ndarray[float64, ndim = 2] new_image = (
*/
__pyx_v_ny = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_image->dimensions[1]));
/* "pysteps/motion/_vet.pyx":134
*
* cdef np.ndarray[float64, ndim = 2] new_image = (
* np.zeros([nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[int8, ndim = 2] morphed_mask = (
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 134, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 134, __pyx_L1_error)
__pyx_t_6 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_new_image = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 133, __pyx_L1_error)
} else {__pyx_pybuffernd_new_image.diminfo[0].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_new_image.diminfo[0].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_new_image.diminfo[1].strides = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_new_image.diminfo[1].shape = __pyx_pybuffernd_new_image.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_6 = 0;
__pyx_v_new_image = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":137
*
* cdef np.ndarray[int8, ndim = 2] morphed_mask = (
* np.zeros([nx, ny], dtype=np.int8)) # <<<<<<<<<<<<<<
*
* morphed_mask[mask > 0] = 1.0
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_int8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_1) < 0) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 137, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_morphed_mask = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 136, __pyx_L1_error)
} else {__pyx_pybuffernd_morphed_mask.diminfo[0].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_mask.diminfo[0].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_mask.diminfo[1].strides = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_mask.diminfo[1].shape = __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_7 = 0;
__pyx_v_morphed_mask = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "pysteps/motion/_vet.pyx":139
* np.zeros([nx, ny], dtype=np.int8))
*
* morphed_mask[mask > 0] = 1.0 # <<<<<<<<<<<<<<
*
* cdef np.ndarray[float64, ndim = 3] gradient_values = (
*/
__pyx_t_1 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_1, __pyx_float_1_0) < 0)) __PYX_ERR(0, 139, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pysteps/motion/_vet.pyx":142
*
* cdef np.ndarray[float64, ndim = 3] gradient_values = (
* np.zeros([2, nx, ny], dtype=np.float64)) # <<<<<<<<<<<<<<
*
* cdef intp x, y
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_nx); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_ny); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2);
__Pyx_GIVEREF(__pyx_t_1);
PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 142, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 142, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
__pyx_v_gradient_values = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 141, __pyx_L1_error)
} else {__pyx_pybuffernd_gradient_values.diminfo[0].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_gradient_values.diminfo[0].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_gradient_values.diminfo[1].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_gradient_values.diminfo[1].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_gradient_values.diminfo[2].strides = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_gradient_values.diminfo[2].shape = __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.shape[2];
}
}
__pyx_t_8 = 0;
__pyx_v_gradient_values = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":146
* cdef intp x, y
*
* cdef intp x_max_int = nx - 1 # <<<<<<<<<<<<<<
* cdef intp y_max_int = ny - 1
*
*/
__pyx_v_x_max_int = (__pyx_v_nx - 1);
/* "pysteps/motion/_vet.pyx":147
*
* cdef intp x_max_int = nx - 1
* cdef intp y_max_int = ny - 1 # <<<<<<<<<<<<<<
*
* cdef float64 x_max_float = <float64> x_max_int
*/
__pyx_v_y_max_int = (__pyx_v_ny - 1);
/* "pysteps/motion/_vet.pyx":149
* cdef intp y_max_int = ny - 1
*
* cdef float64 x_max_float = <float64> x_max_int # <<<<<<<<<<<<<<
* cdef float64 y_max_float = <float64> y_max_int
*
*/
__pyx_v_x_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_max_int);
/* "pysteps/motion/_vet.pyx":150
*
* cdef float64 x_max_float = <float64> x_max_int
* cdef float64 y_max_float = <float64> y_max_int # <<<<<<<<<<<<<<
*
* cdef float64 x_float, y_float, dx, dy
*/
__pyx_v_y_max_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_max_int);
/* "pysteps/motion/_vet.pyx":161
* cdef float64 f00, f10, f01, f11
*
* for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
*
* for y in range(ny):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_9 = __pyx_v_nx;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_11 = (__pyx_t_9 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_11 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_dx) lastprivate(__pyx_v_dy) lastprivate(__pyx_v_f00) lastprivate(__pyx_v_f01) lastprivate(__pyx_v_f10) lastprivate(__pyx_v_f11) firstprivate(__pyx_v_x) lastprivate(__pyx_v_x) lastprivate(__pyx_v_x_ceil) lastprivate(__pyx_v_x_float) lastprivate(__pyx_v_x_floor) lastprivate(__pyx_v_y) lastprivate(__pyx_v_y_ceil) lastprivate(__pyx_v_y_float) lastprivate(__pyx_v_y_floor) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){
{
__pyx_v_x = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_10);
/* Initialize private variables to invalid values */
__pyx_v_dx = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_dy = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_f00 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_f01 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_f10 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_f11 = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_x_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_x_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_y = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_y_ceil = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_y_float = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
/* "pysteps/motion/_vet.pyx":163
* for x in prange(nx, schedule='dynamic', nogil=True):
*
* for y in range(ny): # <<<<<<<<<<<<<<
*
* x_float = (<float64> x) - displacement[0, x, y]
*/
__pyx_t_12 = __pyx_v_ny;
__pyx_t_13 = __pyx_t_12;
for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) {
__pyx_v_y = __pyx_t_14;
/* "pysteps/motion/_vet.pyx":165
* for y in range(ny):
*
* x_float = (<float64> x) - displacement[0, x, y] # <<<<<<<<<<<<<<
* y_float = (<float64> y) - displacement[1, x, y]
*
*/
__pyx_t_15 = 0;
__pyx_t_16 = __pyx_v_x;
__pyx_t_17 = __pyx_v_y;
__pyx_v_x_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_17, __pyx_pybuffernd_displacement.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":166
*
* x_float = (<float64> x) - displacement[0, x, y]
* y_float = (<float64> y) - displacement[1, x, y] # <<<<<<<<<<<<<<
*
* if x_float < 0:
*/
__pyx_t_18 = 1;
__pyx_t_19 = __pyx_v_x;
__pyx_t_20 = __pyx_v_y;
__pyx_v_y_float = (((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_20, __pyx_pybuffernd_displacement.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":168
* y_float = (<float64> y) - displacement[1, x, y]
*
* if x_float < 0: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* x_float = 0
*/
__pyx_t_21 = ((__pyx_v_x_float < 0.0) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":169
*
* if x_float < 0:
* morphed_mask[x, y] = 1 # <<<<<<<<<<<<<<
* x_float = 0
* x_floor = 0
*/
__pyx_t_22 = __pyx_v_x;
__pyx_t_23 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_23, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1;
/* "pysteps/motion/_vet.pyx":170
* if x_float < 0:
* morphed_mask[x, y] = 1
* x_float = 0 # <<<<<<<<<<<<<<
* x_floor = 0
* x_ceil = 0
*/
__pyx_v_x_float = 0.0;
/* "pysteps/motion/_vet.pyx":171
* morphed_mask[x, y] = 1
* x_float = 0
* x_floor = 0 # <<<<<<<<<<<<<<
* x_ceil = 0
*
*/
__pyx_v_x_floor = 0;
/* "pysteps/motion/_vet.pyx":172
* x_float = 0
* x_floor = 0
* x_ceil = 0 # <<<<<<<<<<<<<<
*
* elif x_float > x_max_float:
*/
__pyx_v_x_ceil = 0;
/* "pysteps/motion/_vet.pyx":168
* y_float = (<float64> y) - displacement[1, x, y]
*
* if x_float < 0: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* x_float = 0
*/
goto __pyx_L12;
}
/* "pysteps/motion/_vet.pyx":174
* x_ceil = 0
*
* elif x_float > x_max_float: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* x_float = x_max_float
*/
__pyx_t_21 = ((__pyx_v_x_float > __pyx_v_x_max_float) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":175
*
* elif x_float > x_max_float:
* morphed_mask[x, y] = 1 # <<<<<<<<<<<<<<
* x_float = x_max_float
* x_floor = x_max_int
*/
__pyx_t_24 = __pyx_v_x;
__pyx_t_25 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_24, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_25, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1;
/* "pysteps/motion/_vet.pyx":176
* elif x_float > x_max_float:
* morphed_mask[x, y] = 1
* x_float = x_max_float # <<<<<<<<<<<<<<
* x_floor = x_max_int
* x_ceil = x_max_int
*/
__pyx_v_x_float = __pyx_v_x_max_float;
/* "pysteps/motion/_vet.pyx":177
* morphed_mask[x, y] = 1
* x_float = x_max_float
* x_floor = x_max_int # <<<<<<<<<<<<<<
* x_ceil = x_max_int
*
*/
__pyx_v_x_floor = __pyx_v_x_max_int;
/* "pysteps/motion/_vet.pyx":178
* x_float = x_max_float
* x_floor = x_max_int
* x_ceil = x_max_int # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_v_x_ceil = __pyx_v_x_max_int;
/* "pysteps/motion/_vet.pyx":174
* x_ceil = 0
*
* elif x_float > x_max_float: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* x_float = x_max_float
*/
goto __pyx_L12;
}
/* "pysteps/motion/_vet.pyx":181
*
* else:
* x_floor = <intp> floor(x_float) # <<<<<<<<<<<<<<
* x_ceil = x_floor + 1
* if x_ceil > x_max_int:
*/
/*else*/ {
__pyx_v_x_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_x_float));
/* "pysteps/motion/_vet.pyx":182
* else:
* x_floor = <intp> floor(x_float)
* x_ceil = x_floor + 1 # <<<<<<<<<<<<<<
* if x_ceil > x_max_int:
* x_ceil = x_max_int
*/
__pyx_v_x_ceil = (__pyx_v_x_floor + 1);
/* "pysteps/motion/_vet.pyx":183
* x_floor = <intp> floor(x_float)
* x_ceil = x_floor + 1
* if x_ceil > x_max_int: # <<<<<<<<<<<<<<
* x_ceil = x_max_int
*
*/
__pyx_t_21 = ((__pyx_v_x_ceil > __pyx_v_x_max_int) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":184
* x_ceil = x_floor + 1
* if x_ceil > x_max_int:
* x_ceil = x_max_int # <<<<<<<<<<<<<<
*
* if y_float < 0:
*/
__pyx_v_x_ceil = __pyx_v_x_max_int;
/* "pysteps/motion/_vet.pyx":183
* x_floor = <intp> floor(x_float)
* x_ceil = x_floor + 1
* if x_ceil > x_max_int: # <<<<<<<<<<<<<<
* x_ceil = x_max_int
*
*/
}
}
__pyx_L12:;
/* "pysteps/motion/_vet.pyx":186
* x_ceil = x_max_int
*
* if y_float < 0: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* y_float = 0
*/
__pyx_t_21 = ((__pyx_v_y_float < 0.0) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":187
*
* if y_float < 0:
* morphed_mask[x, y] = 1 # <<<<<<<<<<<<<<
* y_float = 0
* y_floor = 0
*/
__pyx_t_26 = __pyx_v_x;
__pyx_t_27 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_27, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1;
/* "pysteps/motion/_vet.pyx":188
* if y_float < 0:
* morphed_mask[x, y] = 1
* y_float = 0 # <<<<<<<<<<<<<<
* y_floor = 0
* y_ceil = 0
*/
__pyx_v_y_float = 0.0;
/* "pysteps/motion/_vet.pyx":189
* morphed_mask[x, y] = 1
* y_float = 0
* y_floor = 0 # <<<<<<<<<<<<<<
* y_ceil = 0
* elif y_float > y_max_float:
*/
__pyx_v_y_floor = 0;
/* "pysteps/motion/_vet.pyx":190
* y_float = 0
* y_floor = 0
* y_ceil = 0 # <<<<<<<<<<<<<<
* elif y_float > y_max_float:
* morphed_mask[x, y] = 1
*/
__pyx_v_y_ceil = 0;
/* "pysteps/motion/_vet.pyx":186
* x_ceil = x_max_int
*
* if y_float < 0: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* y_float = 0
*/
goto __pyx_L14;
}
/* "pysteps/motion/_vet.pyx":191
* y_floor = 0
* y_ceil = 0
* elif y_float > y_max_float: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* y_float = y_max_float
*/
__pyx_t_21 = ((__pyx_v_y_float > __pyx_v_y_max_float) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":192
* y_ceil = 0
* elif y_float > y_max_float:
* morphed_mask[x, y] = 1 # <<<<<<<<<<<<<<
* y_float = y_max_float
* y_floor = y_max_int
*/
__pyx_t_28 = __pyx_v_x;
__pyx_t_29 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_28, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_29, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = 1;
/* "pysteps/motion/_vet.pyx":193
* elif y_float > y_max_float:
* morphed_mask[x, y] = 1
* y_float = y_max_float # <<<<<<<<<<<<<<
* y_floor = y_max_int
* y_ceil = y_max_int
*/
__pyx_v_y_float = __pyx_v_y_max_float;
/* "pysteps/motion/_vet.pyx":194
* morphed_mask[x, y] = 1
* y_float = y_max_float
* y_floor = y_max_int # <<<<<<<<<<<<<<
* y_ceil = y_max_int
* else:
*/
__pyx_v_y_floor = __pyx_v_y_max_int;
/* "pysteps/motion/_vet.pyx":195
* y_float = y_max_float
* y_floor = y_max_int
* y_ceil = y_max_int # <<<<<<<<<<<<<<
* else:
* y_floor = <intp> floor(y_float)
*/
__pyx_v_y_ceil = __pyx_v_y_max_int;
/* "pysteps/motion/_vet.pyx":191
* y_floor = 0
* y_ceil = 0
* elif y_float > y_max_float: # <<<<<<<<<<<<<<
* morphed_mask[x, y] = 1
* y_float = y_max_float
*/
goto __pyx_L14;
}
/* "pysteps/motion/_vet.pyx":197
* y_ceil = y_max_int
* else:
* y_floor = <intp> floor(y_float) # <<<<<<<<<<<<<<
* y_ceil = y_floor + 1
* if y_ceil > y_max_int:
*/
/*else*/ {
__pyx_v_y_floor = ((__pyx_t_7pysteps_6motion_4_vet_intp)floor(__pyx_v_y_float));
/* "pysteps/motion/_vet.pyx":198
* else:
* y_floor = <intp> floor(y_float)
* y_ceil = y_floor + 1 # <<<<<<<<<<<<<<
* if y_ceil > y_max_int:
* y_ceil = y_max_int
*/
__pyx_v_y_ceil = (__pyx_v_y_floor + 1);
/* "pysteps/motion/_vet.pyx":199
* y_floor = <intp> floor(y_float)
* y_ceil = y_floor + 1
* if y_ceil > y_max_int: # <<<<<<<<<<<<<<
* y_ceil = y_max_int
*
*/
__pyx_t_21 = ((__pyx_v_y_ceil > __pyx_v_y_max_int) != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":200
* y_ceil = y_floor + 1
* if y_ceil > y_max_int:
* y_ceil = y_max_int # <<<<<<<<<<<<<<
*
* dx = x_float - <float64> x_floor
*/
__pyx_v_y_ceil = __pyx_v_y_max_int;
/* "pysteps/motion/_vet.pyx":199
* y_floor = <intp> floor(y_float)
* y_ceil = y_floor + 1
* if y_ceil > y_max_int: # <<<<<<<<<<<<<<
* y_ceil = y_max_int
*
*/
}
}
__pyx_L14:;
/* "pysteps/motion/_vet.pyx":202
* y_ceil = y_max_int
*
* dx = x_float - <float64> x_floor # <<<<<<<<<<<<<<
* dy = y_float - <float64> y_floor
*
*/
__pyx_v_dx = (__pyx_v_x_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_x_floor));
/* "pysteps/motion/_vet.pyx":203
*
* dx = x_float - <float64> x_floor
* dy = y_float - <float64> y_floor # <<<<<<<<<<<<<<
*
* # This assumes that the spacing between grid points=1.
*/
__pyx_v_dy = (__pyx_v_y_float - ((__pyx_t_7pysteps_6motion_4_vet_float64)__pyx_v_y_floor));
/* "pysteps/motion/_vet.pyx":208
*
* # Bilinear interpolation coeficients
* f00 = image[x_floor, y_floor] # <<<<<<<<<<<<<<
* f10 = image[x_ceil, y_floor] - image[x_floor, y_floor]
* f01 = image[x_floor, y_ceil] - image[x_floor, y_floor]
*/
__pyx_t_30 = __pyx_v_x_floor;
__pyx_t_31 = __pyx_v_y_floor;
__pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_31, __pyx_pybuffernd_image.diminfo[1].strides));
/* "pysteps/motion/_vet.pyx":209
* # Bilinear interpolation coeficients
* f00 = image[x_floor, y_floor]
* f10 = image[x_ceil, y_floor] - image[x_floor, y_floor] # <<<<<<<<<<<<<<
* f01 = image[x_floor, y_ceil] - image[x_floor, y_floor]
* f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor]
*/
__pyx_t_32 = __pyx_v_x_ceil;
__pyx_t_33 = __pyx_v_y_floor;
__pyx_t_34 = __pyx_v_x_floor;
__pyx_t_35 = __pyx_v_y_floor;
__pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_33, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_35, __pyx_pybuffernd_image.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":210
* f00 = image[x_floor, y_floor]
* f10 = image[x_ceil, y_floor] - image[x_floor, y_floor]
* f01 = image[x_floor, y_ceil] - image[x_floor, y_floor] # <<<<<<<<<<<<<<
* f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor]
* - image[x_floor, y_ceil] + image[x_ceil, y_ceil])
*/
__pyx_t_36 = __pyx_v_x_floor;
__pyx_t_37 = __pyx_v_y_ceil;
__pyx_t_38 = __pyx_v_x_floor;
__pyx_t_39 = __pyx_v_y_floor;
__pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_37, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_39, __pyx_pybuffernd_image.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":211
* f10 = image[x_ceil, y_floor] - image[x_floor, y_floor]
* f01 = image[x_floor, y_ceil] - image[x_floor, y_floor]
* f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor] # <<<<<<<<<<<<<<
* - image[x_floor, y_ceil] + image[x_ceil, y_ceil])
*
*/
__pyx_t_40 = __pyx_v_x_floor;
__pyx_t_41 = __pyx_v_y_floor;
__pyx_t_42 = __pyx_v_x_ceil;
__pyx_t_43 = __pyx_v_y_floor;
/* "pysteps/motion/_vet.pyx":212
* f01 = image[x_floor, y_ceil] - image[x_floor, y_floor]
* f11 = (image[x_floor, y_floor] - image[x_ceil, y_floor]
* - image[x_floor, y_ceil] + image[x_ceil, y_ceil]) # <<<<<<<<<<<<<<
*
* # Bilinear interpolation
*/
__pyx_t_44 = __pyx_v_x_floor;
__pyx_t_45 = __pyx_v_y_ceil;
__pyx_t_46 = __pyx_v_x_ceil;
__pyx_t_47 = __pyx_v_y_ceil;
__pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_41, __pyx_pybuffernd_image.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_43, __pyx_pybuffernd_image.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_45, __pyx_pybuffernd_image.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_image.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_image.diminfo[0].strides, __pyx_t_47, __pyx_pybuffernd_image.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":215
*
* # Bilinear interpolation
* new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11 # <<<<<<<<<<<<<<
*
* if gradient:
*/
__pyx_t_48 = __pyx_v_x;
__pyx_t_49 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_new_image.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_new_image.diminfo[0].strides, __pyx_t_49, __pyx_pybuffernd_new_image.diminfo[1].strides) = (((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11));
/* "pysteps/motion/_vet.pyx":217
* new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11
*
* if gradient: # <<<<<<<<<<<<<<
* gradient_values[0, x, y] = f10 + dy * f11
* gradient_values[1, x, y] = f01 + dx * f11
*/
__pyx_t_21 = (__pyx_v_gradient != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":218
*
* if gradient:
* gradient_values[0, x, y] = f10 + dy * f11 # <<<<<<<<<<<<<<
* gradient_values[1, x, y] = f01 + dx * f11
*
*/
__pyx_t_50 = 0;
__pyx_t_51 = __pyx_v_x;
__pyx_t_52 = __pyx_v_y;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_51, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_52, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f10 + (__pyx_v_dy * __pyx_v_f11));
/* "pysteps/motion/_vet.pyx":219
* if gradient:
* gradient_values[0, x, y] = f10 + dy * f11
* gradient_values[1, x, y] = f01 + dx * f11 # <<<<<<<<<<<<<<
*
* f00 = mask[x_floor, y_floor]
*/
__pyx_t_53 = 1;
__pyx_t_54 = __pyx_v_x;
__pyx_t_55 = __pyx_v_y;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_gradient_values.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_gradient_values.diminfo[0].strides, __pyx_t_54, __pyx_pybuffernd_gradient_values.diminfo[1].strides, __pyx_t_55, __pyx_pybuffernd_gradient_values.diminfo[2].strides) = (__pyx_v_f01 + (__pyx_v_dx * __pyx_v_f11));
/* "pysteps/motion/_vet.pyx":217
* new_image[x, y] = f00 + dx * f10 + dy * f01 + dx * dy * f11
*
* if gradient: # <<<<<<<<<<<<<<
* gradient_values[0, x, y] = f10 + dy * f11
* gradient_values[1, x, y] = f01 + dx * f11
*/
}
/* "pysteps/motion/_vet.pyx":221
* gradient_values[1, x, y] = f01 + dx * f11
*
* f00 = mask[x_floor, y_floor] # <<<<<<<<<<<<<<
* f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor]
* f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor]
*/
__pyx_t_56 = __pyx_v_x_floor;
__pyx_t_57 = __pyx_v_y_floor;
__pyx_v_f00 = (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_56, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_57, __pyx_pybuffernd_mask.diminfo[1].strides));
/* "pysteps/motion/_vet.pyx":222
*
* f00 = mask[x_floor, y_floor]
* f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor] # <<<<<<<<<<<<<<
* f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor]
* f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor]
*/
__pyx_t_58 = __pyx_v_x_ceil;
__pyx_t_59 = __pyx_v_y_floor;
__pyx_t_60 = __pyx_v_x_floor;
__pyx_t_61 = __pyx_v_y_floor;
__pyx_v_f10 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_59, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_61, __pyx_pybuffernd_mask.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":223
* f00 = mask[x_floor, y_floor]
* f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor]
* f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor] # <<<<<<<<<<<<<<
* f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor]
* - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil])
*/
__pyx_t_62 = __pyx_v_x_floor;
__pyx_t_63 = __pyx_v_y_ceil;
__pyx_t_64 = __pyx_v_x_floor;
__pyx_t_65 = __pyx_v_y_floor;
__pyx_v_f01 = ((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_63, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_65, __pyx_pybuffernd_mask.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":224
* f10 = mask[x_ceil, y_floor] - mask[x_floor, y_floor]
* f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor]
* f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor] # <<<<<<<<<<<<<<
* - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil])
*
*/
__pyx_t_66 = __pyx_v_x_floor;
__pyx_t_67 = __pyx_v_y_floor;
__pyx_t_68 = __pyx_v_x_ceil;
__pyx_t_69 = __pyx_v_y_floor;
/* "pysteps/motion/_vet.pyx":225
* f01 = mask[x_floor, y_ceil] - mask[x_floor, y_floor]
* f11 = (mask[x_floor, y_floor] - mask[x_ceil, y_floor]
* - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil]) # <<<<<<<<<<<<<<
*
* morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01
*/
__pyx_t_70 = __pyx_v_x_floor;
__pyx_t_71 = __pyx_v_y_ceil;
__pyx_t_72 = __pyx_v_x_ceil;
__pyx_t_73 = __pyx_v_y_ceil;
__pyx_v_f11 = ((((*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_mask.diminfo[1].strides)) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_69, __pyx_pybuffernd_mask.diminfo[1].strides))) - (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_71, __pyx_pybuffernd_mask.diminfo[1].strides))) + (*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_mask.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_mask.diminfo[0].strides, __pyx_t_73, __pyx_pybuffernd_mask.diminfo[1].strides)));
/* "pysteps/motion/_vet.pyx":227
* - mask[x_floor, y_ceil] + mask[x_ceil, y_ceil])
*
* morphed_mask[x, y] = <int8> (f00 + dx * f10 + dy * f01 # <<<<<<<<<<<<<<
* + dx * dy * f11)
*
*/
__pyx_t_74 = __pyx_v_x;
__pyx_t_75 = __pyx_v_y;
*__Pyx_BufPtrStrided2d(__pyx_t_7pysteps_6motion_4_vet_int8 *, __pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_morphed_mask.diminfo[0].strides, __pyx_t_75, __pyx_pybuffernd_morphed_mask.diminfo[1].strides) = ((__pyx_t_7pysteps_6motion_4_vet_int8)(((__pyx_v_f00 + (__pyx_v_dx * __pyx_v_f10)) + (__pyx_v_dy * __pyx_v_f01)) + ((__pyx_v_dx * __pyx_v_dy) * __pyx_v_f11)));
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "pysteps/motion/_vet.pyx":161
* cdef float64 f00, f10, f01, f11
*
* for x in prange(nx, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
*
* for y in range(ny):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "pysteps/motion/_vet.pyx":230
* + dx * dy * f11)
*
* morphed_mask[morphed_mask != 0] = 1 # <<<<<<<<<<<<<<
* if gradient:
* return new_image, morphed_mask, gradient_values
*/
__pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_morphed_mask), __pyx_int_0, Py_NE); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 230, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morphed_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 230, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":231
*
* morphed_mask[morphed_mask != 0] = 1
* if gradient: # <<<<<<<<<<<<<<
* return new_image, morphed_mask, gradient_values
* else:
*/
__pyx_t_21 = (__pyx_v_gradient != 0);
if (__pyx_t_21) {
/* "pysteps/motion/_vet.pyx":232
* morphed_mask[morphed_mask != 0] = 1
* if gradient:
* return new_image, morphed_mask, gradient_values # <<<<<<<<<<<<<<
* else:
* return new_image, morphed_mask
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 232, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_new_image));
__Pyx_GIVEREF(((PyObject *)__pyx_v_new_image));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image));
__Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask));
__Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask));
PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask));
__Pyx_INCREF(((PyObject *)__pyx_v_gradient_values));
__Pyx_GIVEREF(((PyObject *)__pyx_v_gradient_values));
PyTuple_SET_ITEM(__pyx_t_5, 2, ((PyObject *)__pyx_v_gradient_values));
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "pysteps/motion/_vet.pyx":231
*
* morphed_mask[morphed_mask != 0] = 1
* if gradient: # <<<<<<<<<<<<<<
* return new_image, morphed_mask, gradient_values
* else:
*/
}
/* "pysteps/motion/_vet.pyx":234
* return new_image, morphed_mask, gradient_values
* else:
* return new_image, morphed_mask # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(((PyObject *)__pyx_v_new_image));
__Pyx_GIVEREF(((PyObject *)__pyx_v_new_image));
PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_v_new_image));
__Pyx_INCREF(((PyObject *)__pyx_v_morphed_mask));
__Pyx_GIVEREF(((PyObject *)__pyx_v_morphed_mask));
PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_morphed_mask));
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
}
/* "pysteps/motion/_vet.pyx":67
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<<
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("pysteps.motion._vet._warp", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gradient_values.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_image.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_new_image);
__Pyx_XDECREF((PyObject *)__pyx_v_morphed_mask);
__Pyx_XDECREF((PyObject *)__pyx_v_gradient_values);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "pysteps/motion/_vet.pyx":240
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<<
* np.ndarray[float64, ndim=2] template_image,
* np.ndarray[float64, ndim=2] input_image,
*/
/* Python wrapper */
static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_7pysteps_6motion_4_vet_2_cost_function[] = "\n Variational Echo Tracking Cost function.\n \n This function computes the Variational Echo Tracking (VET) \n Cost function presented by `Laroche and Zawazdki (1995)`_ and used in the \n McGill Algorithm for Prediction by Lagrangian Extrapolation (MAPLE) \n described in\n `Germann and Zawadzki (2002)`_.\n \n \n .. _`Laroche and Zawazdki (1995)`: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n .. _`Germann and Zawadzki (2002)`: http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2\n \n \n The cost function is a the sum of the residuals of the squared image \n differences along with a smoothness constrain. \n \n This cost function implementation, supports displacement vector \n sectorization.\n The displacement vector represent the displacement applied to the pixels in\n each individual sector.\n \n This help to reduce the number of degrees of freedom of the cost function \n when hierarchical approaches are used to obtain the minima of \n the cost function (from low resolution to full image resolution).\n For example, in the MAPLE algorithm an Scaling Guess procedure is used to \n find the displacement vectors.\n The echo motion field is retrieved in three runs with increasing resolution.\n The retrieval starts with (left) a uniform field, which is used as a first \n guess to retrieve (middle) the field on a 5 \303\227 5 grid, which in turn is the \n first guess of (right) the final minimization with a 25 \303\227 25 grid\n \n The shape of the sector is deduced from the image shape and the displacement\n vector shape. \n \n IMPORTANT: The number of sectors in each dimension (x and y) must be a \n factor full image size.\n \n The value of displaced pixels that fall outside the limits takes the \n value of the nearest edge.\n \n The cost function is computed in parallel over the x axis.""\n \n .. _ndarray: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html\n \n Parameters\n ----------\n \n sector_displacement : ndarray_ (ndim=3) \n Array of displacements to apply to each sector. The dimensions are:\n sector_displacement [ x (0) or y (1) displacement, \n i index of sector, j index of sector ] \n \n \n template_image : ndarray_ (ndim=2)\n Input image array where the sector displacement is applied.\n \n input_image : ndarray_\n Image array to be used as reference \n \n smooth_gain : float\n Smoothness constrain gain\n\n mask : ndarray_ (ndim=2)\n Data mask. If is True, the data is marked as not valid and is not\n used in the computations.\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n Returns\n -------\n \n penalty or gradient values.\n\n penalty : float\n Value of the cost function\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n \n \n References\n ----------\n \n Laroche, S., and I. Zawadzki, 1995: \n Retrievals of horizontal winds from single-Doppler clear-air data by methods\n of cross-correlation and variational analysis. \n J. Atmos. Oceanic Technol., 12, 721\342\200\223738.\n doi: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n \n Germann, U. and I. Zawadzki, 2002: \n Scale-Dependence of the Predictability of Precipitation from Continental \n Radar Images.\n Part I: Description of the Methodology. Mon. Wea. Rev., 130, 2859\342\200\2232873,\n doi: 10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2. \n \n ";
static PyMethodDef __pyx_mdef_7pysteps_6motion_4_vet_3_cost_function = {"_cost_function", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7pysteps_6motion_4_vet_3_cost_function, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7pysteps_6motion_4_vet_2_cost_function};
static PyObject *__pyx_pw_7pysteps_6motion_4_vet_3_cost_function(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_sector_displacement = 0;
PyArrayObject *__pyx_v_template_image = 0;
PyArrayObject *__pyx_v_input_image = 0;
PyArrayObject *__pyx_v_mask = 0;
float __pyx_v_smooth_gain;
int __pyx_v_gradient;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("_cost_function (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sector_displacement,&__pyx_n_s_template_image,&__pyx_n_s_input_image,&__pyx_n_s_mask,&__pyx_n_s_smooth_gain,&__pyx_n_s_gradient,0};
PyObject* values[6] = {0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sector_displacement)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_template_image)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 1); __PYX_ERR(0, 240, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_input_image)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 2); __PYX_ERR(0, 240, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 3); __PYX_ERR(0, 240, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_smooth_gain)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, 4); __PYX_ERR(0, 240, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_gradient);
if (value) { values[5] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_cost_function") < 0)) __PYX_ERR(0, 240, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_sector_displacement = ((PyArrayObject *)values[0]);
__pyx_v_template_image = ((PyArrayObject *)values[1]);
__pyx_v_input_image = ((PyArrayObject *)values[2]);
__pyx_v_mask = ((PyArrayObject *)values[3]);
__pyx_v_smooth_gain = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_smooth_gain == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 244, __pyx_L3_error)
if (values[5]) {
__pyx_v_gradient = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_gradient == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 245, __pyx_L3_error)
} else {
/* "pysteps/motion/_vet.pyx":245
* np.ndarray[int8, ndim=2] mask,
* float smooth_gain,
* bint gradient = False): # <<<<<<<<<<<<<<
* """
* Variational Echo Tracking Cost function.
*/
__pyx_v_gradient = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("_cost_function", 0, 5, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 240, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_sector_displacement), __pyx_ptype_5numpy_ndarray, 1, "sector_displacement", 0))) __PYX_ERR(0, 240, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_template_image), __pyx_ptype_5numpy_ndarray, 1, "template_image", 0))) __PYX_ERR(0, 241, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_input_image), __pyx_ptype_5numpy_ndarray, 1, "input_image", 0))) __PYX_ERR(0, 242, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 243, __pyx_L1_error)
__pyx_r = __pyx_pf_7pysteps_6motion_4_vet_2_cost_function(__pyx_self, __pyx_v_sector_displacement, __pyx_v_template_image, __pyx_v_input_image, __pyx_v_mask, __pyx_v_smooth_gain, __pyx_v_gradient);
/* "pysteps/motion/_vet.pyx":240
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<<
* np.ndarray[float64, ndim=2] template_image,
* np.ndarray[float64, ndim=2] input_image,
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_7pysteps_6motion_4_vet_2_cost_function(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_sector_displacement, PyArrayObject *__pyx_v_template_image, PyArrayObject *__pyx_v_input_image, PyArrayObject *__pyx_v_mask, float __pyx_v_smooth_gain, int __pyx_v_gradient) {
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sectors;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sectors;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_image_size;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_image_size;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_x_sector_size;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_y_sector_size;
PyArrayObject *__pyx_v_displacement = 0;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_xy;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l0;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m0;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_l1;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_m1;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_i_shift;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_j_shift;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_v_axis;
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_y = 0;
PyArrayObject *__pyx_v_x_guess = 0;
PyArrayObject *__pyx_v_y_guess = 0;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_sector_area;
PyArrayObject *__pyx_v_interp_coef = 0;
PyArrayObject *__pyx_v_l_i = 0;
PyArrayObject *__pyx_v_m_j = 0;
PyArrayObject *__pyx_v_i_min = 0;
PyArrayObject *__pyx_v_i_max = 0;
PyArrayObject *__pyx_v_j_min = 0;
PyArrayObject *__pyx_v_j_max = 0;
PyObject *__pyx_v_counts = NULL;
PyArrayObject *__pyx_v_morphed_image = 0;
PyArrayObject *__pyx_v_morph_mask = 0;
PyArrayObject *__pyx_v__gradient_data = 0;
PyArrayObject *__pyx_v_grad_residuals = 0;
PyArrayObject *__pyx_v_grad_smooth = 0;
PyArrayObject *__pyx_v_buffer = 0;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_residuals;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_smoothness_penalty;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dx2;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dxdy;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_df_dy2;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_v_inloop_smoothness_penalty;
__Pyx_LocalBuf_ND __pyx_pybuffernd__gradient_data;
__Pyx_Buffer __pyx_pybuffer__gradient_data;
__Pyx_LocalBuf_ND __pyx_pybuffernd_buffer;
__Pyx_Buffer __pyx_pybuffer_buffer;
__Pyx_LocalBuf_ND __pyx_pybuffernd_displacement;
__Pyx_Buffer __pyx_pybuffer_displacement;
__Pyx_LocalBuf_ND __pyx_pybuffernd_grad_residuals;
__Pyx_Buffer __pyx_pybuffer_grad_residuals;
__Pyx_LocalBuf_ND __pyx_pybuffernd_grad_smooth;
__Pyx_Buffer __pyx_pybuffer_grad_smooth;
__Pyx_LocalBuf_ND __pyx_pybuffernd_i_max;
__Pyx_Buffer __pyx_pybuffer_i_max;
__Pyx_LocalBuf_ND __pyx_pybuffernd_i_min;
__Pyx_Buffer __pyx_pybuffer_i_min;
__Pyx_LocalBuf_ND __pyx_pybuffernd_input_image;
__Pyx_Buffer __pyx_pybuffer_input_image;
__Pyx_LocalBuf_ND __pyx_pybuffernd_interp_coef;
__Pyx_Buffer __pyx_pybuffer_interp_coef;
__Pyx_LocalBuf_ND __pyx_pybuffernd_j_max;
__Pyx_Buffer __pyx_pybuffer_j_max;
__Pyx_LocalBuf_ND __pyx_pybuffernd_j_min;
__Pyx_Buffer __pyx_pybuffer_j_min;
__Pyx_LocalBuf_ND __pyx_pybuffernd_l_i;
__Pyx_Buffer __pyx_pybuffer_l_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_m_j;
__Pyx_Buffer __pyx_pybuffer_m_j;
__Pyx_LocalBuf_ND __pyx_pybuffernd_mask;
__Pyx_Buffer __pyx_pybuffer_mask;
__Pyx_LocalBuf_ND __pyx_pybuffernd_morph_mask;
__Pyx_Buffer __pyx_pybuffer_morph_mask;
__Pyx_LocalBuf_ND __pyx_pybuffernd_morphed_image;
__Pyx_Buffer __pyx_pybuffer_morphed_image;
__Pyx_LocalBuf_ND __pyx_pybuffernd_sector_displacement;
__Pyx_Buffer __pyx_pybuffer_sector_displacement;
__Pyx_LocalBuf_ND __pyx_pybuffernd_template_image;
__Pyx_Buffer __pyx_pybuffer_template_image;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x;
__Pyx_Buffer __pyx_pybuffer_x;
__Pyx_LocalBuf_ND __pyx_pybuffernd_x_guess;
__Pyx_Buffer __pyx_pybuffer_x_guess;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y;
__Pyx_Buffer __pyx_pybuffer_y;
__Pyx_LocalBuf_ND __pyx_pybuffernd_y_guess;
__Pyx_Buffer __pyx_pybuffer_y_guess;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyArrayObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
int __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
PyArrayObject *__pyx_t_13 = NULL;
PyArrayObject *__pyx_t_14 = NULL;
PyArrayObject *__pyx_t_15 = NULL;
PyArrayObject *__pyx_t_16 = NULL;
PyArrayObject *__pyx_t_17 = NULL;
PyArrayObject *__pyx_t_18 = NULL;
PyArrayObject *__pyx_t_19 = NULL;
PyArrayObject *__pyx_t_20 = NULL;
PyArrayObject *__pyx_t_21 = NULL;
PyArrayObject *__pyx_t_22 = NULL;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_23;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_24;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_25;
Py_ssize_t __pyx_t_26;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_27;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_28;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_29;
Py_ssize_t __pyx_t_30;
Py_ssize_t __pyx_t_31;
Py_ssize_t __pyx_t_32;
Py_ssize_t __pyx_t_33;
Py_ssize_t __pyx_t_34;
Py_ssize_t __pyx_t_35;
Py_ssize_t __pyx_t_36;
Py_ssize_t __pyx_t_37;
Py_ssize_t __pyx_t_38;
Py_ssize_t __pyx_t_39;
Py_ssize_t __pyx_t_40;
Py_ssize_t __pyx_t_41;
Py_ssize_t __pyx_t_42;
Py_ssize_t __pyx_t_43;
Py_ssize_t __pyx_t_44;
Py_ssize_t __pyx_t_45;
Py_ssize_t __pyx_t_46;
Py_ssize_t __pyx_t_47;
Py_ssize_t __pyx_t_48;
Py_ssize_t __pyx_t_49;
Py_ssize_t __pyx_t_50;
Py_ssize_t __pyx_t_51;
Py_ssize_t __pyx_t_52;
Py_ssize_t __pyx_t_53;
Py_ssize_t __pyx_t_54;
Py_ssize_t __pyx_t_55;
Py_ssize_t __pyx_t_56;
Py_ssize_t __pyx_t_57;
Py_ssize_t __pyx_t_58;
Py_ssize_t __pyx_t_59;
Py_ssize_t __pyx_t_60;
Py_ssize_t __pyx_t_61;
Py_ssize_t __pyx_t_62;
Py_ssize_t __pyx_t_63;
Py_ssize_t __pyx_t_64;
Py_ssize_t __pyx_t_65;
Py_ssize_t __pyx_t_66;
Py_ssize_t __pyx_t_67;
Py_ssize_t __pyx_t_68;
Py_ssize_t __pyx_t_69;
Py_ssize_t __pyx_t_70;
Py_ssize_t __pyx_t_71;
Py_ssize_t __pyx_t_72;
Py_ssize_t __pyx_t_73;
Py_ssize_t __pyx_t_74;
Py_ssize_t __pyx_t_75;
Py_ssize_t __pyx_t_76;
Py_ssize_t __pyx_t_77;
Py_ssize_t __pyx_t_78;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_79;
Py_ssize_t __pyx_t_80;
Py_ssize_t __pyx_t_81;
Py_ssize_t __pyx_t_82;
Py_ssize_t __pyx_t_83;
Py_ssize_t __pyx_t_84;
Py_ssize_t __pyx_t_85;
Py_ssize_t __pyx_t_86;
Py_ssize_t __pyx_t_87;
Py_ssize_t __pyx_t_88;
Py_ssize_t __pyx_t_89;
Py_ssize_t __pyx_t_90;
Py_ssize_t __pyx_t_91;
Py_ssize_t __pyx_t_92;
Py_ssize_t __pyx_t_93;
Py_ssize_t __pyx_t_94;
Py_ssize_t __pyx_t_95;
Py_ssize_t __pyx_t_96;
Py_ssize_t __pyx_t_97;
Py_ssize_t __pyx_t_98;
Py_ssize_t __pyx_t_99;
Py_ssize_t __pyx_t_100;
Py_ssize_t __pyx_t_101;
Py_ssize_t __pyx_t_102;
Py_ssize_t __pyx_t_103;
Py_ssize_t __pyx_t_104;
Py_ssize_t __pyx_t_105;
Py_ssize_t __pyx_t_106;
Py_ssize_t __pyx_t_107;
PyObject *(*__pyx_t_108)(PyObject *);
PyObject *__pyx_t_109 = NULL;
PyObject *(*__pyx_t_110)(PyObject *);
Py_ssize_t __pyx_t_111;
Py_ssize_t __pyx_t_112;
Py_ssize_t __pyx_t_113;
Py_ssize_t __pyx_t_114;
PyArrayObject *__pyx_t_115 = NULL;
PyArrayObject *__pyx_t_116 = NULL;
PyArrayObject *__pyx_t_117 = NULL;
PyArrayObject *__pyx_t_118 = NULL;
PyArrayObject *__pyx_t_119 = NULL;
PyArrayObject *__pyx_t_120 = NULL;
PyObject *__pyx_t_121 = NULL;
Py_ssize_t __pyx_t_122;
Py_ssize_t __pyx_t_123;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_124;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_125;
Py_ssize_t __pyx_t_126;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_127;
Py_ssize_t __pyx_t_128;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_129;
__pyx_t_7pysteps_6motion_4_vet_intp __pyx_t_130;
Py_ssize_t __pyx_t_131;
Py_ssize_t __pyx_t_132;
Py_ssize_t __pyx_t_133;
Py_ssize_t __pyx_t_134;
Py_ssize_t __pyx_t_135;
Py_ssize_t __pyx_t_136;
Py_ssize_t __pyx_t_137;
Py_ssize_t __pyx_t_138;
Py_ssize_t __pyx_t_139;
Py_ssize_t __pyx_t_140;
Py_ssize_t __pyx_t_141;
Py_ssize_t __pyx_t_142;
Py_ssize_t __pyx_t_143;
Py_ssize_t __pyx_t_144;
Py_ssize_t __pyx_t_145;
Py_ssize_t __pyx_t_146;
Py_ssize_t __pyx_t_147;
Py_ssize_t __pyx_t_148;
Py_ssize_t __pyx_t_149;
Py_ssize_t __pyx_t_150;
Py_ssize_t __pyx_t_151;
Py_ssize_t __pyx_t_152;
Py_ssize_t __pyx_t_153;
Py_ssize_t __pyx_t_154;
Py_ssize_t __pyx_t_155;
Py_ssize_t __pyx_t_156;
Py_ssize_t __pyx_t_157;
Py_ssize_t __pyx_t_158;
Py_ssize_t __pyx_t_159;
Py_ssize_t __pyx_t_160;
Py_ssize_t __pyx_t_161;
Py_ssize_t __pyx_t_162;
Py_ssize_t __pyx_t_163;
Py_ssize_t __pyx_t_164;
Py_ssize_t __pyx_t_165;
Py_ssize_t __pyx_t_166;
Py_ssize_t __pyx_t_167;
Py_ssize_t __pyx_t_168;
Py_ssize_t __pyx_t_169;
Py_ssize_t __pyx_t_170;
Py_ssize_t __pyx_t_171;
Py_ssize_t __pyx_t_172;
Py_ssize_t __pyx_t_173;
Py_ssize_t __pyx_t_174;
Py_ssize_t __pyx_t_175;
Py_ssize_t __pyx_t_176;
Py_ssize_t __pyx_t_177;
Py_ssize_t __pyx_t_178;
Py_ssize_t __pyx_t_179;
Py_ssize_t __pyx_t_180;
Py_ssize_t __pyx_t_181;
Py_ssize_t __pyx_t_182;
Py_ssize_t __pyx_t_183;
Py_ssize_t __pyx_t_184;
Py_ssize_t __pyx_t_185;
Py_ssize_t __pyx_t_186;
Py_ssize_t __pyx_t_187;
Py_ssize_t __pyx_t_188;
Py_ssize_t __pyx_t_189;
Py_ssize_t __pyx_t_190;
Py_ssize_t __pyx_t_191;
Py_ssize_t __pyx_t_192;
Py_ssize_t __pyx_t_193;
Py_ssize_t __pyx_t_194;
Py_ssize_t __pyx_t_195;
Py_ssize_t __pyx_t_196;
Py_ssize_t __pyx_t_197;
Py_ssize_t __pyx_t_198;
Py_ssize_t __pyx_t_199;
Py_ssize_t __pyx_t_200;
Py_ssize_t __pyx_t_201;
Py_ssize_t __pyx_t_202;
Py_ssize_t __pyx_t_203;
Py_ssize_t __pyx_t_204;
Py_ssize_t __pyx_t_205;
Py_ssize_t __pyx_t_206;
Py_ssize_t __pyx_t_207;
Py_ssize_t __pyx_t_208;
Py_ssize_t __pyx_t_209;
Py_ssize_t __pyx_t_210;
Py_ssize_t __pyx_t_211;
Py_ssize_t __pyx_t_212;
Py_ssize_t __pyx_t_213;
Py_ssize_t __pyx_t_214;
Py_ssize_t __pyx_t_215;
Py_ssize_t __pyx_t_216;
Py_ssize_t __pyx_t_217;
Py_ssize_t __pyx_t_218;
Py_ssize_t __pyx_t_219;
Py_ssize_t __pyx_t_220;
Py_ssize_t __pyx_t_221;
Py_ssize_t __pyx_t_222;
Py_ssize_t __pyx_t_223;
Py_ssize_t __pyx_t_224;
Py_ssize_t __pyx_t_225;
Py_ssize_t __pyx_t_226;
__pyx_t_7pysteps_6motion_4_vet_float64 __pyx_t_227;
long __pyx_t_228;
long __pyx_t_229;
long __pyx_t_230;
long __pyx_t_231;
Py_ssize_t __pyx_t_232;
Py_ssize_t __pyx_t_233;
Py_ssize_t __pyx_t_234;
Py_ssize_t __pyx_t_235;
Py_ssize_t __pyx_t_236;
Py_ssize_t __pyx_t_237;
Py_ssize_t __pyx_t_238;
Py_ssize_t __pyx_t_239;
Py_ssize_t __pyx_t_240;
Py_ssize_t __pyx_t_241;
Py_ssize_t __pyx_t_242;
Py_ssize_t __pyx_t_243;
Py_ssize_t __pyx_t_244;
Py_ssize_t __pyx_t_245;
Py_ssize_t __pyx_t_246;
Py_ssize_t __pyx_t_247;
Py_ssize_t __pyx_t_248;
Py_ssize_t __pyx_t_249;
Py_ssize_t __pyx_t_250;
Py_ssize_t __pyx_t_251;
Py_ssize_t __pyx_t_252;
Py_ssize_t __pyx_t_253;
Py_ssize_t __pyx_t_254;
Py_ssize_t __pyx_t_255;
Py_ssize_t __pyx_t_256;
Py_ssize_t __pyx_t_257;
Py_ssize_t __pyx_t_258;
Py_ssize_t __pyx_t_259;
Py_ssize_t __pyx_t_260;
Py_ssize_t __pyx_t_261;
Py_ssize_t __pyx_t_262;
Py_ssize_t __pyx_t_263;
Py_ssize_t __pyx_t_264;
Py_ssize_t __pyx_t_265;
Py_ssize_t __pyx_t_266;
Py_ssize_t __pyx_t_267;
Py_ssize_t __pyx_t_268;
Py_ssize_t __pyx_t_269;
Py_ssize_t __pyx_t_270;
Py_ssize_t __pyx_t_271;
Py_ssize_t __pyx_t_272;
Py_ssize_t __pyx_t_273;
Py_ssize_t __pyx_t_274;
Py_ssize_t __pyx_t_275;
Py_ssize_t __pyx_t_276;
Py_ssize_t __pyx_t_277;
Py_ssize_t __pyx_t_278;
Py_ssize_t __pyx_t_279;
Py_ssize_t __pyx_t_280;
Py_ssize_t __pyx_t_281;
Py_ssize_t __pyx_t_282;
Py_ssize_t __pyx_t_283;
Py_ssize_t __pyx_t_284;
Py_ssize_t __pyx_t_285;
Py_ssize_t __pyx_t_286;
Py_ssize_t __pyx_t_287;
Py_ssize_t __pyx_t_288;
Py_ssize_t __pyx_t_289;
Py_ssize_t __pyx_t_290;
Py_ssize_t __pyx_t_291;
__Pyx_RefNannySetupContext("_cost_function", 0);
__pyx_pybuffer_displacement.pybuffer.buf = NULL;
__pyx_pybuffer_displacement.refcount = 0;
__pyx_pybuffernd_displacement.data = NULL;
__pyx_pybuffernd_displacement.rcbuffer = &__pyx_pybuffer_displacement;
__pyx_pybuffer_x.pybuffer.buf = NULL;
__pyx_pybuffer_x.refcount = 0;
__pyx_pybuffernd_x.data = NULL;
__pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x;
__pyx_pybuffer_y.pybuffer.buf = NULL;
__pyx_pybuffer_y.refcount = 0;
__pyx_pybuffernd_y.data = NULL;
__pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y;
__pyx_pybuffer_x_guess.pybuffer.buf = NULL;
__pyx_pybuffer_x_guess.refcount = 0;
__pyx_pybuffernd_x_guess.data = NULL;
__pyx_pybuffernd_x_guess.rcbuffer = &__pyx_pybuffer_x_guess;
__pyx_pybuffer_y_guess.pybuffer.buf = NULL;
__pyx_pybuffer_y_guess.refcount = 0;
__pyx_pybuffernd_y_guess.data = NULL;
__pyx_pybuffernd_y_guess.rcbuffer = &__pyx_pybuffer_y_guess;
__pyx_pybuffer_interp_coef.pybuffer.buf = NULL;
__pyx_pybuffer_interp_coef.refcount = 0;
__pyx_pybuffernd_interp_coef.data = NULL;
__pyx_pybuffernd_interp_coef.rcbuffer = &__pyx_pybuffer_interp_coef;
__pyx_pybuffer_l_i.pybuffer.buf = NULL;
__pyx_pybuffer_l_i.refcount = 0;
__pyx_pybuffernd_l_i.data = NULL;
__pyx_pybuffernd_l_i.rcbuffer = &__pyx_pybuffer_l_i;
__pyx_pybuffer_m_j.pybuffer.buf = NULL;
__pyx_pybuffer_m_j.refcount = 0;
__pyx_pybuffernd_m_j.data = NULL;
__pyx_pybuffernd_m_j.rcbuffer = &__pyx_pybuffer_m_j;
__pyx_pybuffer_i_min.pybuffer.buf = NULL;
__pyx_pybuffer_i_min.refcount = 0;
__pyx_pybuffernd_i_min.data = NULL;
__pyx_pybuffernd_i_min.rcbuffer = &__pyx_pybuffer_i_min;
__pyx_pybuffer_i_max.pybuffer.buf = NULL;
__pyx_pybuffer_i_max.refcount = 0;
__pyx_pybuffernd_i_max.data = NULL;
__pyx_pybuffernd_i_max.rcbuffer = &__pyx_pybuffer_i_max;
__pyx_pybuffer_j_min.pybuffer.buf = NULL;
__pyx_pybuffer_j_min.refcount = 0;
__pyx_pybuffernd_j_min.data = NULL;
__pyx_pybuffernd_j_min.rcbuffer = &__pyx_pybuffer_j_min;
__pyx_pybuffer_j_max.pybuffer.buf = NULL;
__pyx_pybuffer_j_max.refcount = 0;
__pyx_pybuffernd_j_max.data = NULL;
__pyx_pybuffernd_j_max.rcbuffer = &__pyx_pybuffer_j_max;
__pyx_pybuffer_morphed_image.pybuffer.buf = NULL;
__pyx_pybuffer_morphed_image.refcount = 0;
__pyx_pybuffernd_morphed_image.data = NULL;
__pyx_pybuffernd_morphed_image.rcbuffer = &__pyx_pybuffer_morphed_image;
__pyx_pybuffer_morph_mask.pybuffer.buf = NULL;
__pyx_pybuffer_morph_mask.refcount = 0;
__pyx_pybuffernd_morph_mask.data = NULL;
__pyx_pybuffernd_morph_mask.rcbuffer = &__pyx_pybuffer_morph_mask;
__pyx_pybuffer__gradient_data.pybuffer.buf = NULL;
__pyx_pybuffer__gradient_data.refcount = 0;
__pyx_pybuffernd__gradient_data.data = NULL;
__pyx_pybuffernd__gradient_data.rcbuffer = &__pyx_pybuffer__gradient_data;
__pyx_pybuffer_grad_residuals.pybuffer.buf = NULL;
__pyx_pybuffer_grad_residuals.refcount = 0;
__pyx_pybuffernd_grad_residuals.data = NULL;
__pyx_pybuffernd_grad_residuals.rcbuffer = &__pyx_pybuffer_grad_residuals;
__pyx_pybuffer_grad_smooth.pybuffer.buf = NULL;
__pyx_pybuffer_grad_smooth.refcount = 0;
__pyx_pybuffernd_grad_smooth.data = NULL;
__pyx_pybuffernd_grad_smooth.rcbuffer = &__pyx_pybuffer_grad_smooth;
__pyx_pybuffer_buffer.pybuffer.buf = NULL;
__pyx_pybuffer_buffer.refcount = 0;
__pyx_pybuffernd_buffer.data = NULL;
__pyx_pybuffernd_buffer.rcbuffer = &__pyx_pybuffer_buffer;
__pyx_pybuffer_sector_displacement.pybuffer.buf = NULL;
__pyx_pybuffer_sector_displacement.refcount = 0;
__pyx_pybuffernd_sector_displacement.data = NULL;
__pyx_pybuffernd_sector_displacement.rcbuffer = &__pyx_pybuffer_sector_displacement;
__pyx_pybuffer_template_image.pybuffer.buf = NULL;
__pyx_pybuffer_template_image.refcount = 0;
__pyx_pybuffernd_template_image.data = NULL;
__pyx_pybuffernd_template_image.rcbuffer = &__pyx_pybuffer_template_image;
__pyx_pybuffer_input_image.pybuffer.buf = NULL;
__pyx_pybuffer_input_image.refcount = 0;
__pyx_pybuffernd_input_image.data = NULL;
__pyx_pybuffernd_input_image.rcbuffer = &__pyx_pybuffer_input_image;
__pyx_pybuffer_mask.pybuffer.buf = NULL;
__pyx_pybuffer_mask.refcount = 0;
__pyx_pybuffernd_mask.data = NULL;
__pyx_pybuffernd_mask.rcbuffer = &__pyx_pybuffer_mask;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_v_sector_displacement, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error)
}
__pyx_pybuffernd_sector_displacement.diminfo[0].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sector_displacement.diminfo[0].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sector_displacement.diminfo[1].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sector_displacement.diminfo[1].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_sector_displacement.diminfo[2].strides = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_sector_displacement.diminfo[2].shape = __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_template_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error)
}
__pyx_pybuffernd_template_image.diminfo[0].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_template_image.diminfo[0].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_template_image.diminfo[1].strides = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_template_image.diminfo[1].shape = __pyx_pybuffernd_template_image.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_input_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error)
}
__pyx_pybuffernd_input_image.diminfo[0].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_input_image.diminfo[0].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_input_image.diminfo[1].strides = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_input_image.diminfo[1].shape = __pyx_pybuffernd_input_image.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 240, __pyx_L1_error)
}
__pyx_pybuffernd_mask.diminfo[0].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_mask.diminfo[0].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_mask.diminfo[1].strides = __pyx_pybuffernd_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_mask.diminfo[1].shape = __pyx_pybuffernd_mask.rcbuffer->pybuffer.shape[1];
/* "pysteps/motion/_vet.pyx":350
* """
*
* cdef intp x_sectors = <intp> sector_displacement.shape[1] # <<<<<<<<<<<<<<
* cdef intp y_sectors = <intp> sector_displacement.shape[2]
*
*/
__pyx_v_x_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[1]));
/* "pysteps/motion/_vet.pyx":351
*
* cdef intp x_sectors = <intp> sector_displacement.shape[1]
* cdef intp y_sectors = <intp> sector_displacement.shape[2] # <<<<<<<<<<<<<<
*
* cdef intp x_image_size = <intp> template_image.shape[0]
*/
__pyx_v_y_sectors = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_sector_displacement->dimensions[2]));
/* "pysteps/motion/_vet.pyx":353
* cdef intp y_sectors = <intp> sector_displacement.shape[2]
*
* cdef intp x_image_size = <intp> template_image.shape[0] # <<<<<<<<<<<<<<
* cdef intp y_image_size = <intp> template_image.shape[1]
*
*/
__pyx_v_x_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[0]));
/* "pysteps/motion/_vet.pyx":354
*
* cdef intp x_image_size = <intp> template_image.shape[0]
* cdef intp y_image_size = <intp> template_image.shape[1] # <<<<<<<<<<<<<<
*
* if x_image_size % x_sectors != 0:
*/
__pyx_v_y_image_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)(__pyx_v_template_image->dimensions[1]));
/* "pysteps/motion/_vet.pyx":356
* cdef intp y_image_size = <intp> template_image.shape[1]
*
* if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<<
* raise ValueError("Error computing cost function.\n",
* "The number of sectors in x axis (axis=0)"
*/
__pyx_t_1 = (((__pyx_v_x_image_size % __pyx_v_x_sectors) != 0) != 0);
if (unlikely(__pyx_t_1)) {
/* "pysteps/motion/_vet.pyx":357
*
* if x_image_size % x_sectors != 0:
* raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<<
* "The number of sectors in x axis (axis=0)"
* + " don't divide the image size")
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 357, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 357, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":356
* cdef intp y_image_size = <intp> template_image.shape[1]
*
* if x_image_size % x_sectors != 0: # <<<<<<<<<<<<<<
* raise ValueError("Error computing cost function.\n",
* "The number of sectors in x axis (axis=0)"
*/
}
/* "pysteps/motion/_vet.pyx":361
* + " don't divide the image size")
*
* if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<<
* raise ValueError("Error computing cost function.\n",
* "The number of sectors in y axis (axis=1) don't"
*/
__pyx_t_1 = (((__pyx_v_y_image_size % __pyx_v_y_sectors) != 0) != 0);
if (unlikely(__pyx_t_1)) {
/* "pysteps/motion/_vet.pyx":362
*
* if y_image_size % y_sectors != 0:
* raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<<
* "The number of sectors in y axis (axis=1) don't"
* + " divide the image size")
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 362, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 362, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":361
* + " don't divide the image size")
*
* if y_image_size % y_sectors != 0: # <<<<<<<<<<<<<<
* raise ValueError("Error computing cost function.\n",
* "The number of sectors in y axis (axis=1) don't"
*/
}
/* "pysteps/motion/_vet.pyx":367
*
* cdef intp x_sector_size = (
* <intp> (round(x_image_size / x_sectors))) # <<<<<<<<<<<<<<
*
* cdef intp y_sector_size = (
*/
__pyx_v_x_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_x_image_size / __pyx_v_x_sectors)));
/* "pysteps/motion/_vet.pyx":370
*
* cdef intp y_sector_size = (
* <intp> (round(y_image_size / y_sectors))) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[float64, ndim = 3] displacement = (
*/
__pyx_v_y_sector_size = ((__pyx_t_7pysteps_6motion_4_vet_intp)round((__pyx_v_y_image_size / __pyx_v_y_sectors)));
/* "pysteps/motion/_vet.pyx":373
*
* cdef np.ndarray[float64, ndim = 3] displacement = (
* np.zeros([2, x_image_size, y_image_size], dtype=np.float64)) # <<<<<<<<<<<<<<
*
* cdef intp i, j, xy, l, m, ll, mm, i_sec, j_sec
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyList_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_2);
__Pyx_GIVEREF(__pyx_t_2);
PyList_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyList_SET_ITEM(__pyx_t_5, 2, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 373, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 373, __pyx_L1_error)
__pyx_t_7 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
__pyx_v_displacement = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 372, __pyx_L1_error)
} else {__pyx_pybuffernd_displacement.diminfo[0].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_displacement.diminfo[0].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_displacement.diminfo[1].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_displacement.diminfo[1].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_displacement.diminfo[2].strides = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_displacement.diminfo[2].shape = __pyx_pybuffernd_displacement.rcbuffer->pybuffer.shape[2];
}
}
__pyx_t_7 = 0;
__pyx_v_displacement = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":378
* cdef intp l0, m0, l1, m1, i_shift, j_shift, axis
*
* i_shift = (x_sector_size // 2) # <<<<<<<<<<<<<<
* j_shift = (y_sector_size // 2)
*
*/
__pyx_v_i_shift = (__pyx_v_x_sector_size / 2);
/* "pysteps/motion/_vet.pyx":379
*
* i_shift = (x_sector_size // 2)
* j_shift = (y_sector_size // 2) # <<<<<<<<<<<<<<
*
* #Assume regular grid with constant grid spacing.
*/
__pyx_v_j_shift = (__pyx_v_y_sector_size / 2);
/* "pysteps/motion/_vet.pyx":385
* cdef np.ndarray[float64, ndim = 1] x
* cdef np.ndarray[float64, ndim = 1] y
* x = np.arange(x_image_size, dtype='float64') # <<<<<<<<<<<<<<
* y = np.arange(y_image_size, dtype='float64')
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_arange); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 385, __pyx_L1_error)
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 385, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 385, __pyx_L1_error)
}
__pyx_t_8 = 0;
__pyx_v_x = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":386
* cdef np.ndarray[float64, ndim = 1] y
* x = np.arange(x_image_size, dtype='float64')
* y = np.arange(y_image_size, dtype='float64') # <<<<<<<<<<<<<<
*
* cdef np.ndarray[float64, ndim = 1] x_guess
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_arange); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_n_u_float64) < 0) __PYX_ERR(0, 386, __pyx_L1_error)
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 386, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 386, __pyx_L1_error)
__pyx_t_13 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 386, __pyx_L1_error)
}
__pyx_t_13 = 0;
__pyx_v_y = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":391
* cdef np.ndarray[float64, ndim = 1] y_guess
*
* x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1) # <<<<<<<<<<<<<<
* y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1)
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_x), __pyx_n_s_reshape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_6);
__pyx_t_4 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_6, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_mean); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 391, __pyx_L1_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 391, __pyx_L1_error)
__pyx_t_14 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_14, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_x_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_x_guess.diminfo[0].strides = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x_guess.diminfo[0].shape = __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 391, __pyx_L1_error)
}
__pyx_t_14 = 0;
__pyx_v_x_guess = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":392
*
* x_guess = x.reshape((x_sectors, x_sector_size)).mean(axis=1)
* y_guess = y.reshape((y_sectors, y_sector_size)).mean(axis=1) # <<<<<<<<<<<<<<
*
* cdef float64 sector_area
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_y), __pyx_n_s_reshape); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sector_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_6);
__pyx_t_3 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mean); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 392, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 392, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 392, __pyx_L1_error)
__pyx_t_15 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_t_15, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer, (PyObject*)__pyx_v_y_guess, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_y_guess.diminfo[0].strides = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y_guess.diminfo[0].shape = __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.shape[0];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 392, __pyx_L1_error)
}
__pyx_t_15 = 0;
__pyx_v_y_guess = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "pysteps/motion/_vet.pyx":398
* cdef np.ndarray[float64, ndim = 3] interp_coef
*
* interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp)
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyList_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyList_SET_ITEM(__pyx_t_6, 0, __pyx_int_4);
__Pyx_GIVEREF(__pyx_t_4);
PyList_SET_ITEM(__pyx_t_6, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyList_SET_ITEM(__pyx_t_6, 2, __pyx_t_5);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 398, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 398, __pyx_L1_error)
__pyx_t_16 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_t_16, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer, (PyObject*)__pyx_v_interp_coef, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_interp_coef.diminfo[0].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_interp_coef.diminfo[0].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_interp_coef.diminfo[1].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_interp_coef.diminfo[1].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_interp_coef.diminfo[2].strides = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_interp_coef.diminfo[2].shape = __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 398, __pyx_L1_error)
}
__pyx_t_16 = 0;
__pyx_v_interp_coef = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":400
* interp_coef = np.zeros([4, x_image_size, y_image_size], dtype=np.float64)
*
* cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp) # <<<<<<<<<<<<<<
* cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_4) < 0) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 400, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 400, __pyx_L1_error)
__pyx_t_17 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer, (PyObject*)__pyx_t_17, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_l_i = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 400, __pyx_L1_error)
} else {__pyx_pybuffernd_l_i.diminfo[0].strides = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_l_i.diminfo[0].shape = __pyx_pybuffernd_l_i.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_17 = 0;
__pyx_v_l_i = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "pysteps/motion/_vet.pyx":401
*
* cdef np.ndarray[intp, ndim = 1] l_i = np.zeros(x_image_size, dtype=np.intp)
* cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors,
*/
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 401, __pyx_L1_error)
__pyx_t_18 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer, (PyObject*)__pyx_t_18, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_m_j = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 401, __pyx_L1_error)
} else {__pyx_pybuffernd_m_j.diminfo[0].strides = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_m_j.diminfo[0].shape = __pyx_pybuffernd_m_j.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_18 = 0;
__pyx_v_m_j = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":403
* cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "pysteps/motion/_vet.pyx":404
*
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors,
* x_image_size, # <<<<<<<<<<<<<<
* dtype=np.intp)
*
*/
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 404, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
/* "pysteps/motion/_vet.pyx":403
* cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__pyx_t_2 = 0;
__pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":405
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors,
* x_image_size,
* dtype=np.intp) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors,
*/
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 405, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 405, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 405, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 405, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":403
* cdef np.ndarray[intp, ndim = 1] m_j = np.zeros(y_image_size, dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_min = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 403, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 403, __pyx_L1_error)
__pyx_t_19 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_19, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_i_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 403, __pyx_L1_error)
} else {__pyx_pybuffernd_i_min.diminfo[0].strides = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_min.diminfo[0].shape = __pyx_pybuffernd_i_min.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_19 = 0;
__pyx_v_i_min = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":407
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
/* "pysteps/motion/_vet.pyx":408
*
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors,
* x_image_size, # <<<<<<<<<<<<<<
* dtype=np.intp)
*
*/
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 408, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "pysteps/motion/_vet.pyx":407
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__pyx_t_6 = 0;
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":409
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors,
* x_image_size,
* dtype=np.intp) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors,
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 409, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 409, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":407
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] i_max = np.full(x_sectors, # <<<<<<<<<<<<<<
* x_image_size,
* dtype=np.intp)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 407, __pyx_L1_error)
__pyx_t_20 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_20, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_i_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 407, __pyx_L1_error)
} else {__pyx_pybuffernd_i_max.diminfo[0].strides = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_i_max.diminfo[0].shape = __pyx_pybuffernd_i_max.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_20 = 0;
__pyx_v_i_max = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":411
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_full); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "pysteps/motion/_vet.pyx":412
*
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors,
* y_image_size, # <<<<<<<<<<<<<<
* dtype=np.intp)
*
*/
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 412, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
/* "pysteps/motion/_vet.pyx":411
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
/* "pysteps/motion/_vet.pyx":413
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors,
* y_image_size,
* dtype=np.intp) # <<<<<<<<<<<<<<
*
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors,
*/
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 413, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 413, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_intp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 413, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 413, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":411
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_min = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 411, __pyx_L1_error)
__pyx_t_21 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer, (PyObject*)__pyx_t_21, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_j_min = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 411, __pyx_L1_error)
} else {__pyx_pybuffernd_j_min.diminfo[0].strides = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_min.diminfo[0].shape = __pyx_pybuffernd_j_min.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_21 = 0;
__pyx_v_j_min = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":415
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_full); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
/* "pysteps/motion/_vet.pyx":416
*
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors,
* y_image_size, # <<<<<<<<<<<<<<
* dtype=np.intp)
*
*/
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 416, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
/* "pysteps/motion/_vet.pyx":415
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":417
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors,
* y_image_size,
* dtype=np.intp) # <<<<<<<<<<<<<<
*
* for i in prange(x_image_size, schedule='dynamic', nogil=True):
*/
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 417, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 417, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_intp); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 417, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_2) < 0) __PYX_ERR(0, 417, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":415
* dtype=np.intp)
*
* cdef np.ndarray[intp, ndim = 1] j_max = np.full(y_sectors, # <<<<<<<<<<<<<<
* y_image_size,
* dtype=np.intp)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 415, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 415, __pyx_L1_error)
__pyx_t_22 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer, (PyObject*)__pyx_t_22, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_intp, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_j_max = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 415, __pyx_L1_error)
} else {__pyx_pybuffernd_j_max.diminfo[0].strides = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_j_max.diminfo[0].shape = __pyx_pybuffernd_j_max.rcbuffer->pybuffer.shape[0];
}
}
__pyx_t_22 = 0;
__pyx_v_j_max = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":419
* dtype=np.intp)
*
* for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
*
* l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_23 = __pyx_v_x_image_size;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_25 = (__pyx_t_23 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_25 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_100, __pyx_t_101, __pyx_t_102, __pyx_t_103, __pyx_t_104, __pyx_t_105, __pyx_t_106, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75, __pyx_t_76, __pyx_t_77, __pyx_t_78, __pyx_t_79, __pyx_t_80, __pyx_t_81, __pyx_t_82, __pyx_t_83, __pyx_t_84, __pyx_t_85, __pyx_t_86, __pyx_t_87, __pyx_t_88, __pyx_t_89, __pyx_t_90, __pyx_t_91, __pyx_t_92, __pyx_t_93, __pyx_t_94, __pyx_t_95, __pyx_t_96, __pyx_t_97, __pyx_t_98, __pyx_t_99)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_l0) lastprivate(__pyx_v_l1) lastprivate(__pyx_v_m0) lastprivate(__pyx_v_m1) lastprivate(__pyx_v_sector_area) lastprivate(__pyx_v_xy) schedule(dynamic)
#endif /* _OPENMP */
for (__pyx_t_24 = 0; __pyx_t_24 < __pyx_t_25; __pyx_t_24++){
{
__pyx_v_i = (__pyx_t_7pysteps_6motion_4_vet_intp)(0 + 1 * __pyx_t_24);
/* Initialize private variables to invalid values */
__pyx_v_j = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_l0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_l1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_m0 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_m1 = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
__pyx_v_sector_area = ((__pyx_t_7pysteps_6motion_4_vet_float64)__PYX_NAN());
__pyx_v_xy = ((__pyx_t_7pysteps_6motion_4_vet_intp)0xbad0bad0);
/* "pysteps/motion/_vet.pyx":421
* for i in prange(x_image_size, schedule='dynamic', nogil=True):
*
* l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2) # <<<<<<<<<<<<<<
* l0 = int_max(l0, 0)
* l1 = l0 + 1
*/
__pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_i - __pyx_v_i_shift) / __pyx_v_x_sector_size), (__pyx_v_x_sectors - 2));
/* "pysteps/motion/_vet.pyx":422
*
* l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2)
* l0 = int_max(l0, 0) # <<<<<<<<<<<<<<
* l1 = l0 + 1
*
*/
__pyx_v_l0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_l0, 0);
/* "pysteps/motion/_vet.pyx":423
* l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2)
* l0 = int_max(l0, 0)
* l1 = l0 + 1 # <<<<<<<<<<<<<<
*
* l_i[i] = l0
*/
__pyx_v_l1 = (__pyx_v_l0 + 1);
/* "pysteps/motion/_vet.pyx":425
* l1 = l0 + 1
*
* l_i[i] = l0 # <<<<<<<<<<<<<<
*
* for j in range(y_image_size):
*/
__pyx_t_26 = __pyx_v_i;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_l_i.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_l_i.diminfo[0].strides) = __pyx_v_l0;
/* "pysteps/motion/_vet.pyx":427
* l_i[i] = l0
*
* for j in range(y_image_size): # <<<<<<<<<<<<<<
* m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2)
* m0 = int_max(m0, 0)
*/
__pyx_t_27 = __pyx_v_y_image_size;
__pyx_t_28 = __pyx_t_27;
for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) {
__pyx_v_j = __pyx_t_29;
/* "pysteps/motion/_vet.pyx":428
*
* for j in range(y_image_size):
* m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2) # <<<<<<<<<<<<<<
* m0 = int_max(m0, 0)
* m1 = m0 + 1
*/
__pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_min(((__pyx_v_j - __pyx_v_j_shift) / __pyx_v_y_sector_size), (__pyx_v_y_sectors - 2));
/* "pysteps/motion/_vet.pyx":429
* for j in range(y_image_size):
* m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2)
* m0 = int_max(m0, 0) # <<<<<<<<<<<<<<
* m1 = m0 + 1
*
*/
__pyx_v_m0 = __pyx_f_7pysteps_6motion_4_vet_int_max(__pyx_v_m0, 0);
/* "pysteps/motion/_vet.pyx":430
* m0 = int_min((j - j_shift) // y_sector_size, y_sectors - 2)
* m0 = int_max(m0, 0)
* m1 = m0 + 1 # <<<<<<<<<<<<<<
*
* m_j[j] = m0
*/
__pyx_v_m1 = (__pyx_v_m0 + 1);
/* "pysteps/motion/_vet.pyx":432
* m1 = m0 + 1
*
* m_j[j] = m0 # <<<<<<<<<<<<<<
*
* sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0])
*/
__pyx_t_30 = __pyx_v_j;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_m_j.rcbuffer->pybuffer.buf, __pyx_t_30, __pyx_pybuffernd_m_j.diminfo[0].strides) = __pyx_v_m0;
/* "pysteps/motion/_vet.pyx":434
* m_j[j] = m0
*
* sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0]) # <<<<<<<<<<<<<<
*
* interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1]
*/
__pyx_t_31 = __pyx_v_l1;
__pyx_t_32 = __pyx_v_l0;
__pyx_t_33 = __pyx_v_m1;
__pyx_t_34 = __pyx_v_m0;
__pyx_v_sector_area = (((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_31, __pyx_pybuffernd_x_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_32, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_y_guess.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_34, __pyx_pybuffernd_y_guess.diminfo[0].strides))));
/* "pysteps/motion/_vet.pyx":436
* sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0])
*
* interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<<
* - x[i] * y_guess[m1]
* - x_guess[l1] * y[j]
*/
__pyx_t_35 = __pyx_v_l1;
__pyx_t_36 = __pyx_v_m1;
/* "pysteps/motion/_vet.pyx":437
*
* interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1]
* - x[i] * y_guess[m1] # <<<<<<<<<<<<<<
* - x_guess[l1] * y[j]
* + x[i] * y[j]) / sector_area
*/
__pyx_t_37 = __pyx_v_i;
__pyx_t_38 = __pyx_v_m1;
/* "pysteps/motion/_vet.pyx":438
* interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1]
* - x[i] * y_guess[m1]
* - x_guess[l1] * y[j] # <<<<<<<<<<<<<<
* + x[i] * y[j]) / sector_area
*
*/
__pyx_t_39 = __pyx_v_l1;
__pyx_t_40 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":439
* - x[i] * y_guess[m1]
* - x_guess[l1] * y[j]
* + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<<
*
* interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0]
*/
__pyx_t_41 = __pyx_v_i;
__pyx_t_42 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":436
* sector_area = (x_guess[l1] - x_guess[l0]) * (y_guess[m1] - y_guess[m0])
*
* interp_coef[0, i, j] = (x_guess[l1] * y_guess[m1] # <<<<<<<<<<<<<<
* - x[i] * y_guess[m1]
* - x_guess[l1] * y[j]
*/
__pyx_t_43 = 0;
__pyx_t_44 = __pyx_v_i;
__pyx_t_45 = __pyx_v_j;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_43, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_44, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_45, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_35, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_36, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_37, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_39, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_40, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_42, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area);
/* "pysteps/motion/_vet.pyx":441
* + x[i] * y[j]) / sector_area
*
* interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<<
* + x[i] * y_guess[m0]
* + x_guess[l1] * y[j]
*/
__pyx_t_46 = __pyx_v_l1;
__pyx_t_47 = __pyx_v_m0;
/* "pysteps/motion/_vet.pyx":442
*
* interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0]
* + x[i] * y_guess[m0] # <<<<<<<<<<<<<<
* + x_guess[l1] * y[j]
* - x[i] * y[j]) / sector_area
*/
__pyx_t_48 = __pyx_v_i;
__pyx_t_49 = __pyx_v_m0;
/* "pysteps/motion/_vet.pyx":443
* interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0]
* + x[i] * y_guess[m0]
* + x_guess[l1] * y[j] # <<<<<<<<<<<<<<
* - x[i] * y[j]) / sector_area
*
*/
__pyx_t_50 = __pyx_v_l1;
__pyx_t_51 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":444
* + x[i] * y_guess[m0]
* + x_guess[l1] * y[j]
* - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<<
*
* interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1]
*/
__pyx_t_52 = __pyx_v_i;
__pyx_t_53 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":441
* + x[i] * y[j]) / sector_area
*
* interp_coef[1, i, j] = (-x_guess[l1] * y_guess[m0] # <<<<<<<<<<<<<<
* + x[i] * y_guess[m0]
* + x_guess[l1] * y[j]
*/
__pyx_t_54 = 1;
__pyx_t_55 = __pyx_v_i;
__pyx_t_56 = __pyx_v_j;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_54, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_55, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_56, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_46, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_47, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_48, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_49, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_51, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_52, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area);
/* "pysteps/motion/_vet.pyx":446
* - x[i] * y[j]) / sector_area
*
* interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<<
* + x[i] * y_guess[m1]
* + x_guess[l0] * y[j]
*/
__pyx_t_57 = __pyx_v_l0;
__pyx_t_58 = __pyx_v_m1;
/* "pysteps/motion/_vet.pyx":447
*
* interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1]
* + x[i] * y_guess[m1] # <<<<<<<<<<<<<<
* + x_guess[l0] * y[j]
* - x[i] * y[j]) / sector_area
*/
__pyx_t_59 = __pyx_v_i;
__pyx_t_60 = __pyx_v_m1;
/* "pysteps/motion/_vet.pyx":448
* interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1]
* + x[i] * y_guess[m1]
* + x_guess[l0] * y[j] # <<<<<<<<<<<<<<
* - x[i] * y[j]) / sector_area
*
*/
__pyx_t_61 = __pyx_v_l0;
__pyx_t_62 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":449
* + x[i] * y_guess[m1]
* + x_guess[l0] * y[j]
* - x[i] * y[j]) / sector_area # <<<<<<<<<<<<<<
*
* interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0]
*/
__pyx_t_63 = __pyx_v_i;
__pyx_t_64 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":446
* - x[i] * y[j]) / sector_area
*
* interp_coef[2, i, j] = (-x_guess[l0] * y_guess[m1] # <<<<<<<<<<<<<<
* + x[i] * y_guess[m1]
* + x_guess[l0] * y[j]
*/
__pyx_t_65 = 2;
__pyx_t_66 = __pyx_v_i;
__pyx_t_67 = __pyx_v_j;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_65, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_66, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_67, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((-(*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_x_guess.diminfo[0].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_58, __pyx_pybuffernd_y_guess.diminfo[0].strides))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_59, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_61, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_62, __pyx_pybuffernd_y.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_63, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_64, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area);
/* "pysteps/motion/_vet.pyx":451
* - x[i] * y[j]) / sector_area
*
* interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<<
* - x[i] * y_guess[m0]
* - x_guess[l0] * y[j]
*/
__pyx_t_68 = __pyx_v_l0;
__pyx_t_69 = __pyx_v_m0;
/* "pysteps/motion/_vet.pyx":452
*
* interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0]
* - x[i] * y_guess[m0] # <<<<<<<<<<<<<<
* - x_guess[l0] * y[j]
* + x[i] * y[j]) / sector_area
*/
__pyx_t_70 = __pyx_v_i;
__pyx_t_71 = __pyx_v_m0;
/* "pysteps/motion/_vet.pyx":453
* interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0]
* - x[i] * y_guess[m0]
* - x_guess[l0] * y[j] # <<<<<<<<<<<<<<
* + x[i] * y[j]) / sector_area
*
*/
__pyx_t_72 = __pyx_v_l0;
__pyx_t_73 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":454
* - x[i] * y_guess[m0]
* - x_guess[l0] * y[j]
* + x[i] * y[j]) / sector_area # <<<<<<<<<<<<<<
*
* for xy in range(2):
*/
__pyx_t_74 = __pyx_v_i;
__pyx_t_75 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":451
* - x[i] * y[j]) / sector_area
*
* interp_coef[3, i, j] = (x_guess[l0] * y_guess[m0] # <<<<<<<<<<<<<<
* - x[i] * y_guess[m0]
* - x_guess[l0] * y[j]
*/
__pyx_t_76 = 3;
__pyx_t_77 = __pyx_v_i;
__pyx_t_78 = __pyx_v_j;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_76, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_77, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_78, __pyx_pybuffernd_interp_coef.diminfo[2].strides) = ((((((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_68, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_69, __pyx_pybuffernd_y_guess.diminfo[0].strides))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_70, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y_guess.rcbuffer->pybuffer.buf, __pyx_t_71, __pyx_pybuffernd_y_guess.diminfo[0].strides)))) - ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x_guess.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_x_guess.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_73, __pyx_pybuffernd_y.diminfo[0].strides)))) + ((*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_x.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_75, __pyx_pybuffernd_y.diminfo[0].strides)))) / __pyx_v_sector_area);
/* "pysteps/motion/_vet.pyx":456
* + x[i] * y[j]) / sector_area
*
* for xy in range(2): # <<<<<<<<<<<<<<
* displacement[xy, i, j] = (
* sector_displacement[xy, l0, m0] * interp_coef[0, i, j]
*/
for (__pyx_t_79 = 0; __pyx_t_79 < 2; __pyx_t_79+=1) {
__pyx_v_xy = __pyx_t_79;
/* "pysteps/motion/_vet.pyx":458
* for xy in range(2):
* displacement[xy, i, j] = (
* sector_displacement[xy, l0, m0] * interp_coef[0, i, j] # <<<<<<<<<<<<<<
* + sector_displacement[xy, l0, m1] * interp_coef[1, i, j]
* + sector_displacement[xy, l1, m0] * interp_coef[2, i, j]
*/
__pyx_t_80 = __pyx_v_xy;
__pyx_t_81 = __pyx_v_l0;
__pyx_t_82 = __pyx_v_m0;
__pyx_t_83 = 0;
__pyx_t_84 = __pyx_v_i;
__pyx_t_85 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":459
* displacement[xy, i, j] = (
* sector_displacement[xy, l0, m0] * interp_coef[0, i, j]
* + sector_displacement[xy, l0, m1] * interp_coef[1, i, j] # <<<<<<<<<<<<<<
* + sector_displacement[xy, l1, m0] * interp_coef[2, i, j]
* + sector_displacement[xy, l1, m1] * interp_coef[3, i, j]
*/
__pyx_t_86 = __pyx_v_xy;
__pyx_t_87 = __pyx_v_l0;
__pyx_t_88 = __pyx_v_m1;
__pyx_t_89 = 1;
__pyx_t_90 = __pyx_v_i;
__pyx_t_91 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":460
* sector_displacement[xy, l0, m0] * interp_coef[0, i, j]
* + sector_displacement[xy, l0, m1] * interp_coef[1, i, j]
* + sector_displacement[xy, l1, m0] * interp_coef[2, i, j] # <<<<<<<<<<<<<<
* + sector_displacement[xy, l1, m1] * interp_coef[3, i, j]
* )
*/
__pyx_t_92 = __pyx_v_xy;
__pyx_t_93 = __pyx_v_l1;
__pyx_t_94 = __pyx_v_m0;
__pyx_t_95 = 2;
__pyx_t_96 = __pyx_v_i;
__pyx_t_97 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":461
* + sector_displacement[xy, l0, m1] * interp_coef[1, i, j]
* + sector_displacement[xy, l1, m0] * interp_coef[2, i, j]
* + sector_displacement[xy, l1, m1] * interp_coef[3, i, j] # <<<<<<<<<<<<<<
* )
*
*/
__pyx_t_98 = __pyx_v_xy;
__pyx_t_99 = __pyx_v_l1;
__pyx_t_100 = __pyx_v_m1;
__pyx_t_101 = 3;
__pyx_t_102 = __pyx_v_i;
__pyx_t_103 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":457
*
* for xy in range(2):
* displacement[xy, i, j] = ( # <<<<<<<<<<<<<<
* sector_displacement[xy, l0, m0] * interp_coef[0, i, j]
* + sector_displacement[xy, l0, m1] * interp_coef[1, i, j]
*/
__pyx_t_104 = __pyx_v_xy;
__pyx_t_105 = __pyx_v_i;
__pyx_t_106 = __pyx_v_j;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_displacement.rcbuffer->pybuffer.buf, __pyx_t_104, __pyx_pybuffernd_displacement.diminfo[0].strides, __pyx_t_105, __pyx_pybuffernd_displacement.diminfo[1].strides, __pyx_t_106, __pyx_pybuffernd_displacement.diminfo[2].strides) = (((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_82, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_83, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_84, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_85, __pyx_pybuffernd_interp_coef.diminfo[2].strides))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_86, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_87, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_88, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_89, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_90, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_91, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_92, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_93, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_94, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_95, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_96, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_97, __pyx_pybuffernd_interp_coef.diminfo[2].strides)))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_98, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_99, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_100, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_101, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_102, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_103, __pyx_pybuffernd_interp_coef.diminfo[2].strides))));
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "pysteps/motion/_vet.pyx":419
* dtype=np.intp)
*
* for i in prange(x_image_size, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
*
* l0 = int_min((i - i_shift) // x_sector_size, x_sectors - 2)
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L7;
}
__pyx_L7:;
}
}
/* "pysteps/motion/_vet.pyx":464
* )
*
* for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_unique); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_l_i));
__Pyx_GIVEREF(((PyObject *)__pyx_v_l_i));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_l_i));
/* "pysteps/motion/_vet.pyx":465
*
* for l, i, counts in zip(*np.unique(l_i,
* return_index=True, # <<<<<<<<<<<<<<
* return_counts=True)):
* i_min[l] = i
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 465, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":466
* for l, i, counts in zip(*np.unique(l_i,
* return_index=True,
* return_counts=True)): # <<<<<<<<<<<<<<
* i_min[l] = i
* i_max[l] = i + counts - 1
*/
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 465, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":464
* )
*
* for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(PyList_CheckExact(__pyx_t_4)) || PyTuple_CheckExact(__pyx_t_4)) {
__pyx_t_3 = __pyx_t_4; __Pyx_INCREF(__pyx_t_3); __pyx_t_107 = 0;
__pyx_t_108 = NULL;
} else {
__pyx_t_107 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_108 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 464, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
for (;;) {
if (likely(!__pyx_t_108)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_107); __Pyx_INCREF(__pyx_t_4); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 464, __pyx_L1_error)
#else
__pyx_t_4 = PySequence_ITEM(__pyx_t_3, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
}
} else {
__pyx_t_4 = __pyx_t_108(__pyx_t_3);
if (unlikely(!__pyx_t_4)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 464, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_4);
}
if ((likely(PyTuple_CheckExact(__pyx_t_4))) || (PyList_CheckExact(__pyx_t_4))) {
PyObject* sequence = __pyx_t_4;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 3)) {
if (size > 3) __Pyx_RaiseTooManyValuesError(3);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 464, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__pyx_t_6 = PyTuple_GET_ITEM(sequence, 2);
} else {
__pyx_t_2 = PyList_GET_ITEM(sequence, 0);
__pyx_t_5 = PyList_GET_ITEM(sequence, 1);
__pyx_t_6 = PyList_GET_ITEM(sequence, 2);
}
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
#endif
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_109 = PyObject_GetIter(__pyx_t_4); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_109);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext;
index = 0; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L20_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
index = 1; __pyx_t_5 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_5)) goto __pyx_L20_unpacking_failed;
__Pyx_GOTREF(__pyx_t_5);
index = 2; __pyx_t_6 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_6)) goto __pyx_L20_unpacking_failed;
__Pyx_GOTREF(__pyx_t_6);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 464, __pyx_L1_error)
__pyx_t_110 = NULL;
__Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0;
goto __pyx_L21_unpacking_done;
__pyx_L20_unpacking_failed:;
__Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0;
__pyx_t_110 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 464, __pyx_L1_error)
__pyx_L21_unpacking_done:;
}
__pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_5); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 464, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_l = __pyx_t_25;
__pyx_v_i = __pyx_t_24;
__Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_6);
__pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":467
* return_index=True,
* return_counts=True)):
* i_min[l] = i # <<<<<<<<<<<<<<
* i_max[l] = i + counts - 1
*
*/
__pyx_t_111 = __pyx_v_l;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_111, __pyx_pybuffernd_i_min.diminfo[0].strides) = __pyx_v_i;
/* "pysteps/motion/_vet.pyx":468
* return_counts=True)):
* i_min[l] = i
* i_max[l] = i + counts - 1 # <<<<<<<<<<<<<<
*
* for m, j, counts in zip(*np.unique(m_j,
*/
__pyx_t_4 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_4, __pyx_v_counts); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 468, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyInt_SubtractObjC(__pyx_t_6, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 468, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 468, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_112 = __pyx_v_l;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_112, __pyx_pybuffernd_i_max.diminfo[0].strides) = __pyx_t_24;
/* "pysteps/motion/_vet.pyx":464
* )
*
* for l, i, counts in zip(*np.unique(l_i, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":470
* i_max[l] = i + counts - 1
*
* for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_unique); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_m_j));
__Pyx_GIVEREF(((PyObject *)__pyx_v_m_j));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_m_j));
/* "pysteps/motion/_vet.pyx":471
*
* for m, j, counts in zip(*np.unique(m_j,
* return_index=True, # <<<<<<<<<<<<<<
* return_counts=True)):
* j_min[m] = j
*/
__pyx_t_6 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 471, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_index, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":472
* for m, j, counts in zip(*np.unique(m_j,
* return_index=True,
* return_counts=True)): # <<<<<<<<<<<<<<
* j_min[m] = j
* j_max[m] = j + counts
*/
if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_return_counts, Py_True) < 0) __PYX_ERR(0, 471, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":470
* i_max[l] = i + counts - 1
*
* for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PySequence_Tuple(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_zip, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) {
__pyx_t_6 = __pyx_t_5; __Pyx_INCREF(__pyx_t_6); __pyx_t_107 = 0;
__pyx_t_108 = NULL;
} else {
__pyx_t_107 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_108 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_108)) __PYX_ERR(0, 470, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
for (;;) {
if (likely(!__pyx_t_108)) {
if (likely(PyList_CheckExact(__pyx_t_6))) {
if (__pyx_t_107 >= PyList_GET_SIZE(__pyx_t_6)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_107 >= PyTuple_GET_SIZE(__pyx_t_6)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_107); __Pyx_INCREF(__pyx_t_5); __pyx_t_107++; if (unlikely(0 < 0)) __PYX_ERR(0, 470, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_107); __pyx_t_107++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_108(__pyx_t_6);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 470, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) {
PyObject* sequence = __pyx_t_5;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 3)) {
if (size > 3) __Pyx_RaiseTooManyValuesError(3);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 470, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 2);
} else {
__pyx_t_3 = PyList_GET_ITEM(sequence, 0);
__pyx_t_4 = PyList_GET_ITEM(sequence, 1);
__pyx_t_2 = PyList_GET_ITEM(sequence, 2);
}
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_2);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
#endif
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_109 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_109)) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_109);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_110 = Py_TYPE(__pyx_t_109)->tp_iternext;
index = 0; __pyx_t_3 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_3)) goto __pyx_L24_unpacking_failed;
__Pyx_GOTREF(__pyx_t_3);
index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_4)) goto __pyx_L24_unpacking_failed;
__Pyx_GOTREF(__pyx_t_4);
index = 2; __pyx_t_2 = __pyx_t_110(__pyx_t_109); if (unlikely(!__pyx_t_2)) goto __pyx_L24_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_109), 3) < 0) __PYX_ERR(0, 470, __pyx_L1_error)
__pyx_t_110 = NULL;
__Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0;
goto __pyx_L25_unpacking_done;
__pyx_L24_unpacking_failed:;
__Pyx_DECREF(__pyx_t_109); __pyx_t_109 = 0;
__pyx_t_110 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 470, __pyx_L1_error)
__pyx_L25_unpacking_done:;
}
__pyx_t_24 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_3); if (unlikely((__pyx_t_24 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_4); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 470, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_m = __pyx_t_24;
__pyx_v_j = __pyx_t_25;
__Pyx_XDECREF_SET(__pyx_v_counts, __pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":473
* return_index=True,
* return_counts=True)):
* j_min[m] = j # <<<<<<<<<<<<<<
* j_max[m] = j + counts
*
*/
__pyx_t_113 = __pyx_v_m;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_113, __pyx_pybuffernd_j_min.diminfo[0].strides) = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":474
* return_counts=True)):
* j_min[m] = j
* j_max[m] = j + counts # <<<<<<<<<<<<<<
*
* cdef np.ndarray[float64, ndim = 2] morphed_image
*/
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_j); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 474, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = PyNumber_Add(__pyx_t_5, __pyx_v_counts); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 474, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_25 = __Pyx_PyInt_As_Py_intptr_t(__pyx_t_2); if (unlikely((__pyx_t_25 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 474, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_114 = __pyx_v_m;
*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_114, __pyx_pybuffernd_j_max.diminfo[0].strides) = __pyx_t_25;
/* "pysteps/motion/_vet.pyx":470
* i_max[l] = i + counts - 1
*
* for m, j, counts in zip(*np.unique(m_j, # <<<<<<<<<<<<<<
* return_index=True,
* return_counts=True)):
*/
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":483
*
* cdef np.ndarray[float64, ndim = 2] buffer = \
* np.zeros([x_image_size, y_image_size], dtype=np.float64) # <<<<<<<<<<<<<<
*
* grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64)
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_image_size); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_image_size); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5);
PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 483, __pyx_L1_error)
__pyx_t_115 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
__pyx_v_buffer = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_buffer.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 482, __pyx_L1_error)
} else {__pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_115 = 0;
__pyx_v_buffer = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":485
* np.zeros([x_image_size, y_image_size], dtype=np.float64)
*
* grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<<
*
* grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64)
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_int_2);
__Pyx_GIVEREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_5);
PyList_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__pyx_t_3 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float64); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 485, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 485, __pyx_L1_error)
__pyx_t_116 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 485, __pyx_L1_error)
}
__pyx_t_116 = 0;
__pyx_v_grad_smooth = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "pysteps/motion/_vet.pyx":487
* grad_smooth = np.zeros([2, x_sectors, y_sectors], dtype=np.float64)
*
* grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64) # <<<<<<<<<<<<<<
*
* cdef float64 residuals = 0
*/
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_x_sectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_5 = __Pyx_PyInt_From_Py_intptr_t(__pyx_v_y_sectors); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyList_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_2);
__Pyx_GIVEREF(__pyx_t_6);
PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5);
PyList_SET_ITEM(__pyx_t_4, 2, __pyx_t_5);
__pyx_t_6 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 487, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 487, __pyx_L1_error)
__pyx_t_117 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_t_117, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_residuals, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_grad_residuals.diminfo[0].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_residuals.diminfo[0].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_residuals.diminfo[1].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_residuals.diminfo[1].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_residuals.diminfo[2].strides = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_residuals.diminfo[2].shape = __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 487, __pyx_L1_error)
}
__pyx_t_117 = 0;
__pyx_v_grad_residuals = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":489
* grad_residuals = np.zeros([2, x_sectors, y_sectors], dtype=np.float64)
*
* cdef float64 residuals = 0 # <<<<<<<<<<<<<<
*
* # Compute residual part of the cost function
*/
__pyx_v_residuals = 0.0;
/* "pysteps/motion/_vet.pyx":492
*
* # Compute residual part of the cost function
* if gradient: # <<<<<<<<<<<<<<
*
* morphed_image, morph_mask, _gradient_data = _warp(template_image,
*/
__pyx_t_1 = (__pyx_v_gradient != 0);
if (__pyx_t_1) {
/* "pysteps/motion/_vet.pyx":494
* if gradient:
*
* morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<<
* mask,
* displacement,
*/
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "pysteps/motion/_vet.pyx":496
* morphed_image, morph_mask, _gradient_data = _warp(template_image,
* mask,
* displacement, # <<<<<<<<<<<<<<
* gradient=True)
*
*/
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_INCREF(((PyObject *)__pyx_v_template_image));
__Pyx_GIVEREF(((PyObject *)__pyx_v_template_image));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_v_template_image));
__Pyx_INCREF(((PyObject *)__pyx_v_mask));
__Pyx_GIVEREF(((PyObject *)__pyx_v_mask));
PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_mask));
__Pyx_INCREF(((PyObject *)__pyx_v_displacement));
__Pyx_GIVEREF(((PyObject *)__pyx_v_displacement));
PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_displacement));
/* "pysteps/motion/_vet.pyx":497
* mask,
* displacement,
* gradient=True) # <<<<<<<<<<<<<<
*
* morph_mask[mask > 0] = 1
*/
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 497, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_gradient, Py_True) < 0) __PYX_ERR(0, 497, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":494
* if gradient:
*
* morphed_image, morph_mask, _gradient_data = _warp(template_image, # <<<<<<<<<<<<<<
* mask,
* displacement,
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 3)) {
if (size > 3) __Pyx_RaiseTooManyValuesError(3);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 494, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 2);
} else {
__pyx_t_5 = PyList_GET_ITEM(sequence, 0);
__pyx_t_4 = PyList_GET_ITEM(sequence, 1);
__pyx_t_3 = PyList_GET_ITEM(sequence, 2);
}
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_6 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 494, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_110 = Py_TYPE(__pyx_t_6)->tp_iternext;
index = 0; __pyx_t_5 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L27_unpacking_failed;
__Pyx_GOTREF(__pyx_t_5);
index = 1; __pyx_t_4 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_4)) goto __pyx_L27_unpacking_failed;
__Pyx_GOTREF(__pyx_t_4);
index = 2; __pyx_t_3 = __pyx_t_110(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L27_unpacking_failed;
__Pyx_GOTREF(__pyx_t_3);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_6), 3) < 0) __PYX_ERR(0, 494, __pyx_L1_error)
__pyx_t_110 = NULL;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
goto __pyx_L28_unpacking_done;
__pyx_L27_unpacking_failed:;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_110 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 494, __pyx_L1_error)
__pyx_L28_unpacking_done:;
}
if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error)
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error)
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 494, __pyx_L1_error)
__pyx_t_118 = ((PyArrayObject *)__pyx_t_5);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error)
}
__pyx_t_118 = 0;
__pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_119 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error)
}
__pyx_t_119 = 0;
__pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_120 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_t_120, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer, (PyObject*)__pyx_v__gradient_data, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd__gradient_data.diminfo[0].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd__gradient_data.diminfo[0].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd__gradient_data.diminfo[1].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd__gradient_data.diminfo[1].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd__gradient_data.diminfo[2].strides = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd__gradient_data.diminfo[2].shape = __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 494, __pyx_L1_error)
}
__pyx_t_120 = 0;
__pyx_v__gradient_data = ((PyArrayObject *)__pyx_t_3);
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":499
* gradient=True)
*
* morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<<
*
* buffer = (2 * (input_image - morphed_image))
*/
__pyx_t_2 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 499, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_2, __pyx_int_1) < 0)) __PYX_ERR(0, 499, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":501
* morph_mask[mask > 0] = 1
*
* buffer = (2 * (input_image - morphed_image)) # <<<<<<<<<<<<<<
* buffer[morph_mask == 1] = 0
*
*/
__pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_input_image), ((PyObject *)__pyx_v_morphed_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_int_2, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 501, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 501, __pyx_L1_error)
__pyx_t_115 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_t_115, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer, (PyObject*)__pyx_v_buffer, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_buffer.diminfo[0].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_buffer.diminfo[0].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_buffer.diminfo[1].strides = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_buffer.diminfo[1].shape = __pyx_pybuffernd_buffer.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 501, __pyx_L1_error)
}
__pyx_t_115 = 0;
__Pyx_DECREF_SET(__pyx_v_buffer, ((PyArrayObject *)__pyx_t_3));
__pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":502
*
* buffer = (2 * (input_image - morphed_image))
* buffer[morph_mask == 1] = 0 # <<<<<<<<<<<<<<
*
* _gradient_data[0, :] *= buffer
*/
__pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_1, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 502, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_buffer), __pyx_t_3, __pyx_int_0) < 0)) __PYX_ERR(0, 502, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "pysteps/motion/_vet.pyx":504
* buffer[morph_mask == 1] = 0
*
* _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<<
* _gradient_data[1, :] *= buffer
*
*/
__Pyx_INCREF(__pyx_tuple__4);
__pyx_t_121 = __pyx_tuple__4;
__pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyNumber_InPlaceMultiply(__pyx_t_3, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_2) < 0)) __PYX_ERR(0, 504, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0;
/* "pysteps/motion/_vet.pyx":505
*
* _gradient_data[0, :] *= buffer
* _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<<
*
* for l in range(x_sectors): # schedule='dynamic', nogil=True):
*/
__Pyx_INCREF(__pyx_tuple__5);
__pyx_t_121 = __pyx_tuple__5;
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, ((PyObject *)__pyx_v_buffer)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v__gradient_data), __pyx_t_121, __pyx_t_3) < 0)) __PYX_ERR(0, 505, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_121); __pyx_t_121 = 0;
/* "pysteps/motion/_vet.pyx":507
* _gradient_data[1, :] *= buffer
*
* for l in range(x_sectors): # schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
* for m in range(y_sectors):
* for i in range(i_min[l], i_max[l]):
*/
__pyx_t_25 = __pyx_v_x_sectors;
__pyx_t_24 = __pyx_t_25;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) {
__pyx_v_l = __pyx_t_23;
/* "pysteps/motion/_vet.pyx":508
*
* for l in range(x_sectors): # schedule='dynamic', nogil=True):
* for m in range(y_sectors): # <<<<<<<<<<<<<<
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m], j_max[m]):
*/
__pyx_t_27 = __pyx_v_y_sectors;
__pyx_t_28 = __pyx_t_27;
for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) {
__pyx_v_m = __pyx_t_29;
/* "pysteps/motion/_vet.pyx":509
* for l in range(x_sectors): # schedule='dynamic', nogil=True):
* for m in range(y_sectors):
* for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<<
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
*/
__pyx_t_122 = __pyx_v_l;
__pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_122, __pyx_pybuffernd_i_max.diminfo[0].strides));
__pyx_t_123 = __pyx_v_l;
__pyx_t_124 = __pyx_t_79;
for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_123, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) {
__pyx_v_i = __pyx_t_125;
/* "pysteps/motion/_vet.pyx":510
* for m in range(y_sectors):
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<<
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j]
*/
__pyx_t_126 = __pyx_v_m;
__pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_126, __pyx_pybuffernd_j_max.diminfo[0].strides));
__pyx_t_128 = __pyx_v_m;
__pyx_t_129 = __pyx_t_127;
for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_128, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) {
__pyx_v_j = __pyx_t_130;
/* "pysteps/motion/_vet.pyx":511
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[0, i, j]
* * interp_coef[0, i, j])
*/
__pyx_t_131 = 0;
__pyx_t_132 = __pyx_v_l;
__pyx_t_133 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":512
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[0, i, j])
*
*/
__pyx_t_134 = 0;
__pyx_t_135 = __pyx_v_i;
__pyx_t_136 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":513
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j]
* * interp_coef[0, i, j]) # <<<<<<<<<<<<<<
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
*/
__pyx_t_137 = 0;
__pyx_t_138 = __pyx_v_i;
__pyx_t_139 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":511
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[0, i, j]
* * interp_coef[0, i, j])
*/
__pyx_t_140 = 0;
__pyx_t_141 = __pyx_v_l;
__pyx_t_142 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_140, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_141, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_142, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_131, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_132, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_133, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_134, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_135, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_136, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_137, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_138, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_139, __pyx_pybuffernd_interp_coef.diminfo[2].strides))));
/* "pysteps/motion/_vet.pyx":515
* * interp_coef[0, i, j])
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[1, i, j]
* * interp_coef[0, i, j])
*/
__pyx_t_143 = 1;
__pyx_t_144 = __pyx_v_l;
__pyx_t_145 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":516
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
* (_gradient_data[1, i, j] # <<<<<<<<<<<<<<
* * interp_coef[0, i, j])
*
*/
__pyx_t_146 = 1;
__pyx_t_147 = __pyx_v_i;
__pyx_t_148 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":517
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
* (_gradient_data[1, i, j]
* * interp_coef[0, i, j]) # <<<<<<<<<<<<<<
*
* for m in range(1, y_sectors):
*/
__pyx_t_149 = 0;
__pyx_t_150 = __pyx_v_i;
__pyx_t_151 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":515
* * interp_coef[0, i, j])
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[1, i, j]
* * interp_coef[0, i, j])
*/
__pyx_t_152 = 1;
__pyx_t_153 = __pyx_v_l;
__pyx_t_154 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_152, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_153, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_154, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_143, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_144, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_145, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_146, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_147, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_148, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_149, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_150, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_151, __pyx_pybuffernd_interp_coef.diminfo[2].strides))));
}
}
}
/* "pysteps/motion/_vet.pyx":519
* * interp_coef[0, i, j])
*
* for m in range(1, y_sectors): # <<<<<<<<<<<<<<
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m - 1], j_max[m - 1]):
*/
__pyx_t_27 = __pyx_v_y_sectors;
__pyx_t_28 = __pyx_t_27;
for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) {
__pyx_v_m = __pyx_t_29;
/* "pysteps/motion/_vet.pyx":520
*
* for m in range(1, y_sectors):
* for i in range(i_min[l], i_max[l]): # <<<<<<<<<<<<<<
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
*/
__pyx_t_155 = __pyx_v_l;
__pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_155, __pyx_pybuffernd_i_max.diminfo[0].strides));
__pyx_t_156 = __pyx_v_l;
__pyx_t_124 = __pyx_t_79;
for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_156, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) {
__pyx_v_i = __pyx_t_125;
/* "pysteps/motion/_vet.pyx":521
* for m in range(1, y_sectors):
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<<
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j]
*/
__pyx_t_157 = (__pyx_v_m - 1);
__pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_157, __pyx_pybuffernd_j_max.diminfo[0].strides));
__pyx_t_158 = (__pyx_v_m - 1);
__pyx_t_129 = __pyx_t_127;
for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_158, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) {
__pyx_v_j = __pyx_t_130;
/* "pysteps/motion/_vet.pyx":522
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[0, i, j]
* * interp_coef[1, i, j])
*/
__pyx_t_159 = 0;
__pyx_t_160 = __pyx_v_l;
__pyx_t_161 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":523
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[1, i, j])
*
*/
__pyx_t_162 = 0;
__pyx_t_163 = __pyx_v_i;
__pyx_t_164 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":524
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \
* (_gradient_data[0, i, j]
* * interp_coef[1, i, j]) # <<<<<<<<<<<<<<
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
*/
__pyx_t_165 = 1;
__pyx_t_166 = __pyx_v_i;
__pyx_t_167 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":522
* for i in range(i_min[l], i_max[l]):
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] = grad_residuals[0, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[0, i, j]
* * interp_coef[1, i, j])
*/
__pyx_t_168 = 0;
__pyx_t_169 = __pyx_v_l;
__pyx_t_170 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_168, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_169, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_170, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_159, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_160, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_161, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_162, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_163, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_164, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_165, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_166, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_167, __pyx_pybuffernd_interp_coef.diminfo[2].strides))));
/* "pysteps/motion/_vet.pyx":526
* * interp_coef[1, i, j])
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[1, i, j] # TODO: Check this line!
* * interp_coef[1, i, j])
*/
__pyx_t_171 = 1;
__pyx_t_172 = __pyx_v_l;
__pyx_t_173 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":527
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
* (_gradient_data[1, i, j] # TODO: Check this line! # <<<<<<<<<<<<<<
* * interp_coef[1, i, j])
*
*/
__pyx_t_174 = 1;
__pyx_t_175 = __pyx_v_i;
__pyx_t_176 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":528
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \
* (_gradient_data[1, i, j] # TODO: Check this line!
* * interp_coef[1, i, j]) # <<<<<<<<<<<<<<
*
* for l in range(1, x_sectors): #, schedule='dynamic', nogil=True):
*/
__pyx_t_177 = 1;
__pyx_t_178 = __pyx_v_i;
__pyx_t_179 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":526
* * interp_coef[1, i, j])
*
* grad_residuals[1, l, m] = grad_residuals[1, l, m] + \ # <<<<<<<<<<<<<<
* (_gradient_data[1, i, j] # TODO: Check this line!
* * interp_coef[1, i, j])
*/
__pyx_t_180 = 1;
__pyx_t_181 = __pyx_v_l;
__pyx_t_182 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_180, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_181, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_182, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) = ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_171, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_172, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_173, __pyx_pybuffernd_grad_residuals.diminfo[2].strides)) + ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_174, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_175, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_176, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_177, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_178, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_179, __pyx_pybuffernd_interp_coef.diminfo[2].strides))));
}
}
}
}
/* "pysteps/motion/_vet.pyx":530
* * interp_coef[1, i, j])
*
* for l in range(1, x_sectors): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
* for m in range(y_sectors):
* for i in range(i_min[l - 1], i_max[l - 1]):
*/
__pyx_t_25 = __pyx_v_x_sectors;
__pyx_t_24 = __pyx_t_25;
for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_24; __pyx_t_23+=1) {
__pyx_v_l = __pyx_t_23;
/* "pysteps/motion/_vet.pyx":531
*
* for l in range(1, x_sectors): #, schedule='dynamic', nogil=True):
* for m in range(y_sectors): # <<<<<<<<<<<<<<
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m], j_max[m]):
*/
__pyx_t_27 = __pyx_v_y_sectors;
__pyx_t_28 = __pyx_t_27;
for (__pyx_t_29 = 0; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) {
__pyx_v_m = __pyx_t_29;
/* "pysteps/motion/_vet.pyx":532
* for l in range(1, x_sectors): #, schedule='dynamic', nogil=True):
* for m in range(y_sectors):
* for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<<
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
*/
__pyx_t_183 = (__pyx_v_l - 1);
__pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_183, __pyx_pybuffernd_i_max.diminfo[0].strides));
__pyx_t_184 = (__pyx_v_l - 1);
__pyx_t_124 = __pyx_t_79;
for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_184, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) {
__pyx_v_i = __pyx_t_125;
/* "pysteps/motion/_vet.pyx":533
* for m in range(y_sectors):
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m], j_max[m]): # <<<<<<<<<<<<<<
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[2, i, j])
*/
__pyx_t_185 = __pyx_v_m;
__pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_185, __pyx_pybuffernd_j_max.diminfo[0].strides));
__pyx_t_186 = __pyx_v_m;
__pyx_t_129 = __pyx_t_127;
for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_186, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) {
__pyx_v_j = __pyx_t_130;
/* "pysteps/motion/_vet.pyx":534
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[2, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
*/
__pyx_t_187 = 0;
__pyx_t_188 = __pyx_v_i;
__pyx_t_189 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":535
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[2, i, j]) # <<<<<<<<<<<<<<
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
* * interp_coef[2, i, j])
*/
__pyx_t_190 = 2;
__pyx_t_191 = __pyx_v_i;
__pyx_t_192 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":534
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m], j_max[m]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[2, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
*/
__pyx_t_193 = 0;
__pyx_t_194 = __pyx_v_l;
__pyx_t_195 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_193, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_194, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_195, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_187, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_188, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_189, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_190, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_191, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_192, __pyx_pybuffernd_interp_coef.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":536
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[2, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<<
* * interp_coef[2, i, j])
*
*/
__pyx_t_196 = 1;
__pyx_t_197 = __pyx_v_i;
__pyx_t_198 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":537
* * interp_coef[2, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
* * interp_coef[2, i, j]) # <<<<<<<<<<<<<<
*
* for m in range(1, y_sectors):
*/
__pyx_t_199 = 2;
__pyx_t_200 = __pyx_v_i;
__pyx_t_201 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":536
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[2, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<<
* * interp_coef[2, i, j])
*
*/
__pyx_t_202 = 1;
__pyx_t_203 = __pyx_v_l;
__pyx_t_204 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_202, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_203, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_204, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_196, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_197, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_198, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_199, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_200, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_201, __pyx_pybuffernd_interp_coef.diminfo[2].strides)));
}
}
}
/* "pysteps/motion/_vet.pyx":539
* * interp_coef[2, i, j])
*
* for m in range(1, y_sectors): # <<<<<<<<<<<<<<
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m - 1], j_max[m - 1]):
*/
__pyx_t_27 = __pyx_v_y_sectors;
__pyx_t_28 = __pyx_t_27;
for (__pyx_t_29 = 1; __pyx_t_29 < __pyx_t_28; __pyx_t_29+=1) {
__pyx_v_m = __pyx_t_29;
/* "pysteps/motion/_vet.pyx":540
*
* for m in range(1, y_sectors):
* for i in range(i_min[l - 1], i_max[l - 1]): # <<<<<<<<<<<<<<
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
*/
__pyx_t_205 = (__pyx_v_l - 1);
__pyx_t_79 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_max.rcbuffer->pybuffer.buf, __pyx_t_205, __pyx_pybuffernd_i_max.diminfo[0].strides));
__pyx_t_206 = (__pyx_v_l - 1);
__pyx_t_124 = __pyx_t_79;
for (__pyx_t_125 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_i_min.rcbuffer->pybuffer.buf, __pyx_t_206, __pyx_pybuffernd_i_min.diminfo[0].strides)); __pyx_t_125 < __pyx_t_124; __pyx_t_125+=1) {
__pyx_v_i = __pyx_t_125;
/* "pysteps/motion/_vet.pyx":541
* for m in range(1, y_sectors):
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m - 1], j_max[m - 1]): # <<<<<<<<<<<<<<
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[3, i, j])
*/
__pyx_t_207 = (__pyx_v_m - 1);
__pyx_t_127 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_max.rcbuffer->pybuffer.buf, __pyx_t_207, __pyx_pybuffernd_j_max.diminfo[0].strides));
__pyx_t_208 = (__pyx_v_m - 1);
__pyx_t_129 = __pyx_t_127;
for (__pyx_t_130 = (*__Pyx_BufPtrStrided1d(__pyx_t_7pysteps_6motion_4_vet_intp *, __pyx_pybuffernd_j_min.rcbuffer->pybuffer.buf, __pyx_t_208, __pyx_pybuffernd_j_min.diminfo[0].strides)); __pyx_t_130 < __pyx_t_129; __pyx_t_130+=1) {
__pyx_v_j = __pyx_t_130;
/* "pysteps/motion/_vet.pyx":542
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[3, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
*/
__pyx_t_209 = 0;
__pyx_t_210 = __pyx_v_i;
__pyx_t_211 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":543
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[3, i, j]) # <<<<<<<<<<<<<<
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
* * interp_coef[3, i, j])
*/
__pyx_t_212 = 3;
__pyx_t_213 = __pyx_v_i;
__pyx_t_214 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":542
* for i in range(i_min[l - 1], i_max[l - 1]):
* for j in range(j_min[m - 1], j_max[m - 1]):
* grad_residuals[0, l, m] += (_gradient_data[0, i, j] # <<<<<<<<<<<<<<
* * interp_coef[3, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
*/
__pyx_t_215 = 0;
__pyx_t_216 = __pyx_v_l;
__pyx_t_217 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_215, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_216, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_217, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_209, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_210, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_211, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_212, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_213, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_214, __pyx_pybuffernd_interp_coef.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":544
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[3, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<<
* * interp_coef[3, i, j])
*
*/
__pyx_t_218 = 1;
__pyx_t_219 = __pyx_v_i;
__pyx_t_220 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":545
* * interp_coef[3, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j]
* * interp_coef[3, i, j]) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_221 = 3;
__pyx_t_222 = __pyx_v_i;
__pyx_t_223 = __pyx_v_j;
/* "pysteps/motion/_vet.pyx":544
* grad_residuals[0, l, m] += (_gradient_data[0, i, j]
* * interp_coef[3, i, j])
* grad_residuals[1, l, m] += (_gradient_data[1, i, j] # <<<<<<<<<<<<<<
* * interp_coef[3, i, j])
*
*/
__pyx_t_224 = 1;
__pyx_t_225 = __pyx_v_l;
__pyx_t_226 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer.buf, __pyx_t_224, __pyx_pybuffernd_grad_residuals.diminfo[0].strides, __pyx_t_225, __pyx_pybuffernd_grad_residuals.diminfo[1].strides, __pyx_t_226, __pyx_pybuffernd_grad_residuals.diminfo[2].strides) += ((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd__gradient_data.rcbuffer->pybuffer.buf, __pyx_t_218, __pyx_pybuffernd__gradient_data.diminfo[0].strides, __pyx_t_219, __pyx_pybuffernd__gradient_data.diminfo[1].strides, __pyx_t_220, __pyx_pybuffernd__gradient_data.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_interp_coef.rcbuffer->pybuffer.buf, __pyx_t_221, __pyx_pybuffernd_interp_coef.diminfo[0].strides, __pyx_t_222, __pyx_pybuffernd_interp_coef.diminfo[1].strides, __pyx_t_223, __pyx_pybuffernd_interp_coef.diminfo[2].strides)));
}
}
}
}
/* "pysteps/motion/_vet.pyx":492
*
* # Compute residual part of the cost function
* if gradient: # <<<<<<<<<<<<<<
*
* morphed_image, morph_mask, _gradient_data = _warp(template_image,
*/
goto __pyx_L26;
}
/* "pysteps/motion/_vet.pyx":550
* else:
*
* morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<<
* mask,
* displacement,
*/
/*else*/ {
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_warp); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "pysteps/motion/_vet.pyx":552
* morphed_image, morph_mask = _warp(template_image,
* mask,
* displacement, # <<<<<<<<<<<<<<
* gradient=False)
* morph_mask[mask > 0] = 1
*/
__pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_template_image));
__Pyx_GIVEREF(((PyObject *)__pyx_v_template_image));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_template_image));
__Pyx_INCREF(((PyObject *)__pyx_v_mask));
__Pyx_GIVEREF(((PyObject *)__pyx_v_mask));
PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mask));
__Pyx_INCREF(((PyObject *)__pyx_v_displacement));
__Pyx_GIVEREF(((PyObject *)__pyx_v_displacement));
PyTuple_SET_ITEM(__pyx_t_2, 2, ((PyObject *)__pyx_v_displacement));
/* "pysteps/motion/_vet.pyx":553
* mask,
* displacement,
* gradient=False) # <<<<<<<<<<<<<<
* morph_mask[mask > 0] = 1
* residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2)
*/
__pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 553, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_gradient, Py_False) < 0) __PYX_ERR(0, 553, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":550
* else:
*
* morphed_image, morph_mask = _warp(template_image, # <<<<<<<<<<<<<<
* mask,
* displacement,
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) {
PyObject* sequence = __pyx_t_5;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(0, 550, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
if (likely(PyTuple_CheckExact(sequence))) {
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 1);
} else {
__pyx_t_4 = PyList_GET_ITEM(sequence, 0);
__pyx_t_2 = PyList_GET_ITEM(sequence, 1);
}
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_2);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
#endif
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else {
Py_ssize_t index = -1;
__pyx_t_3 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 550, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_110 = Py_TYPE(__pyx_t_3)->tp_iternext;
index = 0; __pyx_t_4 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_4)) goto __pyx_L57_unpacking_failed;
__Pyx_GOTREF(__pyx_t_4);
index = 1; __pyx_t_2 = __pyx_t_110(__pyx_t_3); if (unlikely(!__pyx_t_2)) goto __pyx_L57_unpacking_failed;
__Pyx_GOTREF(__pyx_t_2);
if (__Pyx_IternextUnpackEndCheck(__pyx_t_110(__pyx_t_3), 2) < 0) __PYX_ERR(0, 550, __pyx_L1_error)
__pyx_t_110 = NULL;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L58_unpacking_done;
__pyx_L57_unpacking_failed:;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_110 = NULL;
if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
__PYX_ERR(0, 550, __pyx_L1_error)
__pyx_L58_unpacking_done:;
}
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error)
if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 550, __pyx_L1_error)
__pyx_t_118 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_t_118, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer, (PyObject*)__pyx_v_morphed_image, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_morphed_image.diminfo[0].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morphed_image.diminfo[0].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morphed_image.diminfo[1].strides = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morphed_image.diminfo[1].shape = __pyx_pybuffernd_morphed_image.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error)
}
__pyx_t_118 = 0;
__pyx_v_morphed_image = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_119 = ((PyArrayObject *)__pyx_t_2);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_t_119, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer, (PyObject*)__pyx_v_morph_mask, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_int8, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_10); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_12);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__pyx_t_10 = __pyx_t_11 = __pyx_t_12 = 0;
}
__pyx_pybuffernd_morph_mask.diminfo[0].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_morph_mask.diminfo[0].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_morph_mask.diminfo[1].strides = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_morph_mask.diminfo[1].shape = __pyx_pybuffernd_morph_mask.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 550, __pyx_L1_error)
}
__pyx_t_119 = 0;
__pyx_v_morph_mask = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
/* "pysteps/motion/_vet.pyx":554
* displacement,
* gradient=False)
* morph_mask[mask > 0] = 1 # <<<<<<<<<<<<<<
* residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2)
*
*/
__pyx_t_5 = PyObject_RichCompare(((PyObject *)__pyx_v_mask), __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 554, __pyx_L1_error)
if (unlikely(PyObject_SetItem(((PyObject *)__pyx_v_morph_mask), __pyx_t_5, __pyx_int_1) < 0)) __PYX_ERR(0, 554, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "pysteps/motion/_vet.pyx":555
* gradient=False)
* morph_mask[mask > 0] = 1
* residuals = np.sum((morphed_image - input_image)[morph_mask == 0] ** 2) # <<<<<<<<<<<<<<
*
* # Compute smoothness constraint part of the cost function
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyNumber_Subtract(((PyObject *)__pyx_v_morphed_image), ((PyObject *)__pyx_v_input_image)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_RichCompare(((PyObject *)__pyx_v_morph_mask), __pyx_int_0, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error)
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Power(__pyx_t_6, __pyx_int_2, Py_None); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_5 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_227 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_227 == ((npy_float64)-1)) && PyErr_Occurred())) __PYX_ERR(0, 555, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_residuals = __pyx_t_227;
}
__pyx_L26:;
/* "pysteps/motion/_vet.pyx":558
*
* # Compute smoothness constraint part of the cost function
* cdef float64 smoothness_penalty = 0 # <<<<<<<<<<<<<<
*
* cdef float64 df_dx2 = 0
*/
__pyx_v_smoothness_penalty = 0.0;
/* "pysteps/motion/_vet.pyx":560
* cdef float64 smoothness_penalty = 0
*
* cdef float64 df_dx2 = 0 # <<<<<<<<<<<<<<
* cdef float64 df_dxdy = 0
* cdef float64 df_dy2 = 0
*/
__pyx_v_df_dx2 = 0.0;
/* "pysteps/motion/_vet.pyx":561
*
* cdef float64 df_dx2 = 0
* cdef float64 df_dxdy = 0 # <<<<<<<<<<<<<<
* cdef float64 df_dy2 = 0
*
*/
__pyx_v_df_dxdy = 0.0;
/* "pysteps/motion/_vet.pyx":562
* cdef float64 df_dx2 = 0
* cdef float64 df_dxdy = 0
* cdef float64 df_dy2 = 0 # <<<<<<<<<<<<<<
*
* cdef float64 inloop_smoothness_penalty
*/
__pyx_v_df_dy2 = 0.0;
/* "pysteps/motion/_vet.pyx":566
* cdef float64 inloop_smoothness_penalty
*
* if smooth_gain > 0.: # <<<<<<<<<<<<<<
*
* for axis in range(2): #, schedule='dynamic', nogil=True):
*/
__pyx_t_1 = ((__pyx_v_smooth_gain > 0.) != 0);
if (__pyx_t_1) {
/* "pysteps/motion/_vet.pyx":568
* if smooth_gain > 0.:
*
* for axis in range(2): #, schedule='dynamic', nogil=True): # <<<<<<<<<<<<<<
*
* inloop_smoothness_penalty = 0
*/
for (__pyx_t_25 = 0; __pyx_t_25 < 2; __pyx_t_25+=1) {
__pyx_v_axis = __pyx_t_25;
/* "pysteps/motion/_vet.pyx":570
* for axis in range(2): #, schedule='dynamic', nogil=True):
*
* inloop_smoothness_penalty = 0 # <<<<<<<<<<<<<<
*
* for l in range(1, x_sectors - 1):
*/
__pyx_v_inloop_smoothness_penalty = 0.0;
/* "pysteps/motion/_vet.pyx":572
* inloop_smoothness_penalty = 0
*
* for l in range(1, x_sectors - 1): # <<<<<<<<<<<<<<
*
* for m in range(1, y_sectors - 1):
*/
__pyx_t_228 = (__pyx_v_x_sectors - 1);
__pyx_t_229 = __pyx_t_228;
for (__pyx_t_24 = 1; __pyx_t_24 < __pyx_t_229; __pyx_t_24+=1) {
__pyx_v_l = __pyx_t_24;
/* "pysteps/motion/_vet.pyx":574
* for l in range(1, x_sectors - 1):
*
* for m in range(1, y_sectors - 1): # <<<<<<<<<<<<<<
* df_dx2 = (sector_displacement[axis, l + 1, m]
* - 2 * sector_displacement[axis, l, m]
*/
__pyx_t_230 = (__pyx_v_y_sectors - 1);
__pyx_t_231 = __pyx_t_230;
for (__pyx_t_23 = 1; __pyx_t_23 < __pyx_t_231; __pyx_t_23+=1) {
__pyx_v_m = __pyx_t_23;
/* "pysteps/motion/_vet.pyx":575
*
* for m in range(1, y_sectors - 1):
* df_dx2 = (sector_displacement[axis, l + 1, m] # <<<<<<<<<<<<<<
* - 2 * sector_displacement[axis, l, m]
* + sector_displacement[axis, l - 1, m])
*/
__pyx_t_232 = __pyx_v_axis;
__pyx_t_233 = (__pyx_v_l + 1);
__pyx_t_234 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":576
* for m in range(1, y_sectors - 1):
* df_dx2 = (sector_displacement[axis, l + 1, m]
* - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<<
* + sector_displacement[axis, l - 1, m])
*
*/
__pyx_t_235 = __pyx_v_axis;
__pyx_t_236 = __pyx_v_l;
__pyx_t_237 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":577
* df_dx2 = (sector_displacement[axis, l + 1, m]
* - 2 * sector_displacement[axis, l, m]
* + sector_displacement[axis, l - 1, m]) # <<<<<<<<<<<<<<
*
* df_dx2 = df_dx2 / (x_sector_size * x_sector_size)
*/
__pyx_t_238 = __pyx_v_axis;
__pyx_t_239 = (__pyx_v_l - 1);
__pyx_t_240 = __pyx_v_m;
__pyx_v_df_dx2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_232, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_233, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_234, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_235, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_236, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_237, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_238, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_239, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_240, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":579
* + sector_displacement[axis, l - 1, m])
*
* df_dx2 = df_dx2 / (x_sector_size * x_sector_size) # <<<<<<<<<<<<<<
*
* df_dy2 = (sector_displacement[axis, l, m + 1]
*/
__pyx_v_df_dx2 = (__pyx_v_df_dx2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_x_sector_size * __pyx_v_x_sector_size)));
/* "pysteps/motion/_vet.pyx":581
* df_dx2 = df_dx2 / (x_sector_size * x_sector_size)
*
* df_dy2 = (sector_displacement[axis, l, m + 1] # <<<<<<<<<<<<<<
* - 2 * sector_displacement[axis, l, m]
* + sector_displacement[axis, l, m - 1])
*/
__pyx_t_241 = __pyx_v_axis;
__pyx_t_242 = __pyx_v_l;
__pyx_t_243 = (__pyx_v_m + 1);
/* "pysteps/motion/_vet.pyx":582
*
* df_dy2 = (sector_displacement[axis, l, m + 1]
* - 2 * sector_displacement[axis, l, m] # <<<<<<<<<<<<<<
* + sector_displacement[axis, l, m - 1])
*
*/
__pyx_t_244 = __pyx_v_axis;
__pyx_t_245 = __pyx_v_l;
__pyx_t_246 = __pyx_v_m;
/* "pysteps/motion/_vet.pyx":583
* df_dy2 = (sector_displacement[axis, l, m + 1]
* - 2 * sector_displacement[axis, l, m]
* + sector_displacement[axis, l, m - 1]) # <<<<<<<<<<<<<<
*
* df_dy2 = df_dy2 / (y_sector_size * y_sector_size)
*/
__pyx_t_247 = __pyx_v_axis;
__pyx_t_248 = __pyx_v_l;
__pyx_t_249 = (__pyx_v_m - 1);
__pyx_v_df_dy2 = (((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_241, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_242, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_243, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (2.0 * (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_244, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_245, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_246, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_247, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_248, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_249, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":585
* + sector_displacement[axis, l, m - 1])
*
* df_dy2 = df_dy2 / (y_sector_size * y_sector_size) # <<<<<<<<<<<<<<
*
* df_dxdy = (sector_displacement[axis, l + 1, m + 1]
*/
__pyx_v_df_dy2 = (__pyx_v_df_dy2 / ((__pyx_t_7pysteps_6motion_4_vet_float64)(__pyx_v_y_sector_size * __pyx_v_y_sector_size)));
/* "pysteps/motion/_vet.pyx":587
* df_dy2 = df_dy2 / (y_sector_size * y_sector_size)
*
* df_dxdy = (sector_displacement[axis, l + 1, m + 1] # <<<<<<<<<<<<<<
* - sector_displacement[axis, l + 1, m - 1]
* - sector_displacement[axis, l - 1, m + 1]
*/
__pyx_t_250 = __pyx_v_axis;
__pyx_t_251 = (__pyx_v_l + 1);
__pyx_t_252 = (__pyx_v_m + 1);
/* "pysteps/motion/_vet.pyx":588
*
* df_dxdy = (sector_displacement[axis, l + 1, m + 1]
* - sector_displacement[axis, l + 1, m - 1] # <<<<<<<<<<<<<<
* - sector_displacement[axis, l - 1, m + 1]
* + sector_displacement[axis, l - 1, m - 1])
*/
__pyx_t_253 = __pyx_v_axis;
__pyx_t_254 = (__pyx_v_l + 1);
__pyx_t_255 = (__pyx_v_m - 1);
/* "pysteps/motion/_vet.pyx":589
* df_dxdy = (sector_displacement[axis, l + 1, m + 1]
* - sector_displacement[axis, l + 1, m - 1]
* - sector_displacement[axis, l - 1, m + 1] # <<<<<<<<<<<<<<
* + sector_displacement[axis, l - 1, m - 1])
* df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size)
*/
__pyx_t_256 = __pyx_v_axis;
__pyx_t_257 = (__pyx_v_l - 1);
__pyx_t_258 = (__pyx_v_m + 1);
/* "pysteps/motion/_vet.pyx":590
* - sector_displacement[axis, l + 1, m - 1]
* - sector_displacement[axis, l - 1, m + 1]
* + sector_displacement[axis, l - 1, m - 1]) # <<<<<<<<<<<<<<
* df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size)
*
*/
__pyx_t_259 = __pyx_v_axis;
__pyx_t_260 = (__pyx_v_l - 1);
__pyx_t_261 = (__pyx_v_m - 1);
__pyx_v_df_dxdy = ((((*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_250, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_251, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_252, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_253, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_254, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_255, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) - (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_256, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_257, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_258, __pyx_pybuffernd_sector_displacement.diminfo[2].strides))) + (*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer.buf, __pyx_t_259, __pyx_pybuffernd_sector_displacement.diminfo[0].strides, __pyx_t_260, __pyx_pybuffernd_sector_displacement.diminfo[1].strides, __pyx_t_261, __pyx_pybuffernd_sector_displacement.diminfo[2].strides)));
/* "pysteps/motion/_vet.pyx":591
* - sector_displacement[axis, l - 1, m + 1]
* + sector_displacement[axis, l - 1, m - 1])
* df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size) # <<<<<<<<<<<<<<
*
* if gradient:
*/
__pyx_v_df_dxdy = (__pyx_v_df_dxdy / ((__pyx_t_7pysteps_6motion_4_vet_float64)((4 * __pyx_v_x_sector_size) * __pyx_v_y_sector_size)));
/* "pysteps/motion/_vet.pyx":593
* df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size)
*
* if gradient: # <<<<<<<<<<<<<<
* grad_smooth[axis, l, m] -= 2 * df_dx2
* grad_smooth[axis, l + 1, m] += df_dx2
*/
__pyx_t_1 = (__pyx_v_gradient != 0);
if (__pyx_t_1) {
/* "pysteps/motion/_vet.pyx":594
*
* if gradient:
* grad_smooth[axis, l, m] -= 2 * df_dx2 # <<<<<<<<<<<<<<
* grad_smooth[axis, l + 1, m] += df_dx2
* grad_smooth[axis, l - 1, m] += df_dx2
*/
__pyx_t_262 = __pyx_v_axis;
__pyx_t_263 = __pyx_v_l;
__pyx_t_264 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_262, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_263, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_264, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dx2);
/* "pysteps/motion/_vet.pyx":595
* if gradient:
* grad_smooth[axis, l, m] -= 2 * df_dx2
* grad_smooth[axis, l + 1, m] += df_dx2 # <<<<<<<<<<<<<<
* grad_smooth[axis, l - 1, m] += df_dx2
*
*/
__pyx_t_265 = __pyx_v_axis;
__pyx_t_266 = (__pyx_v_l + 1);
__pyx_t_267 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_265, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_266, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_267, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2;
/* "pysteps/motion/_vet.pyx":596
* grad_smooth[axis, l, m] -= 2 * df_dx2
* grad_smooth[axis, l + 1, m] += df_dx2
* grad_smooth[axis, l - 1, m] += df_dx2 # <<<<<<<<<<<<<<
*
* grad_smooth[axis, l, m] -= 2 * df_dy2
*/
__pyx_t_268 = __pyx_v_axis;
__pyx_t_269 = (__pyx_v_l - 1);
__pyx_t_270 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_268, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_269, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_270, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dx2;
/* "pysteps/motion/_vet.pyx":598
* grad_smooth[axis, l - 1, m] += df_dx2
*
* grad_smooth[axis, l, m] -= 2 * df_dy2 # <<<<<<<<<<<<<<
* grad_smooth[axis, l, m - 1] += df_dy2
* grad_smooth[axis, l, m + 1] += df_dy2
*/
__pyx_t_271 = __pyx_v_axis;
__pyx_t_272 = __pyx_v_l;
__pyx_t_273 = __pyx_v_m;
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_271, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_272, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_273, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= (2.0 * __pyx_v_df_dy2);
/* "pysteps/motion/_vet.pyx":599
*
* grad_smooth[axis, l, m] -= 2 * df_dy2
* grad_smooth[axis, l, m - 1] += df_dy2 # <<<<<<<<<<<<<<
* grad_smooth[axis, l, m + 1] += df_dy2
*
*/
__pyx_t_274 = __pyx_v_axis;
__pyx_t_275 = __pyx_v_l;
__pyx_t_276 = (__pyx_v_m - 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_274, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_275, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_276, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2;
/* "pysteps/motion/_vet.pyx":600
* grad_smooth[axis, l, m] -= 2 * df_dy2
* grad_smooth[axis, l, m - 1] += df_dy2
* grad_smooth[axis, l, m + 1] += df_dy2 # <<<<<<<<<<<<<<
*
* grad_smooth[axis, l - 1, m - 1] += df_dxdy
*/
__pyx_t_277 = __pyx_v_axis;
__pyx_t_278 = __pyx_v_l;
__pyx_t_279 = (__pyx_v_m + 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_277, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_278, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_279, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dy2;
/* "pysteps/motion/_vet.pyx":602
* grad_smooth[axis, l, m + 1] += df_dy2
*
* grad_smooth[axis, l - 1, m - 1] += df_dxdy # <<<<<<<<<<<<<<
* grad_smooth[axis, l - 1, m + 1] -= df_dxdy
* grad_smooth[axis, l + 1, m - 1] -= df_dxdy
*/
__pyx_t_280 = __pyx_v_axis;
__pyx_t_281 = (__pyx_v_l - 1);
__pyx_t_282 = (__pyx_v_m - 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_280, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_281, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_282, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy;
/* "pysteps/motion/_vet.pyx":603
*
* grad_smooth[axis, l - 1, m - 1] += df_dxdy
* grad_smooth[axis, l - 1, m + 1] -= df_dxdy # <<<<<<<<<<<<<<
* grad_smooth[axis, l + 1, m - 1] -= df_dxdy
* grad_smooth[axis, l + 1, m + 1] += df_dxdy
*/
__pyx_t_283 = __pyx_v_axis;
__pyx_t_284 = (__pyx_v_l - 1);
__pyx_t_285 = (__pyx_v_m + 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_283, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_284, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_285, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy;
/* "pysteps/motion/_vet.pyx":604
* grad_smooth[axis, l - 1, m - 1] += df_dxdy
* grad_smooth[axis, l - 1, m + 1] -= df_dxdy
* grad_smooth[axis, l + 1, m - 1] -= df_dxdy # <<<<<<<<<<<<<<
* grad_smooth[axis, l + 1, m + 1] += df_dxdy
*
*/
__pyx_t_286 = __pyx_v_axis;
__pyx_t_287 = (__pyx_v_l + 1);
__pyx_t_288 = (__pyx_v_m - 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_286, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_287, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_288, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) -= __pyx_v_df_dxdy;
/* "pysteps/motion/_vet.pyx":605
* grad_smooth[axis, l - 1, m + 1] -= df_dxdy
* grad_smooth[axis, l + 1, m - 1] -= df_dxdy
* grad_smooth[axis, l + 1, m + 1] += df_dxdy # <<<<<<<<<<<<<<
*
* inloop_smoothness_penalty = (df_dx2 * df_dx2
*/
__pyx_t_289 = __pyx_v_axis;
__pyx_t_290 = (__pyx_v_l + 1);
__pyx_t_291 = (__pyx_v_m + 1);
*__Pyx_BufPtrStrided3d(__pyx_t_7pysteps_6motion_4_vet_float64 *, __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.buf, __pyx_t_289, __pyx_pybuffernd_grad_smooth.diminfo[0].strides, __pyx_t_290, __pyx_pybuffernd_grad_smooth.diminfo[1].strides, __pyx_t_291, __pyx_pybuffernd_grad_smooth.diminfo[2].strides) += __pyx_v_df_dxdy;
/* "pysteps/motion/_vet.pyx":593
* df_dxdy = df_dxdy / (4 * x_sector_size * y_sector_size)
*
* if gradient: # <<<<<<<<<<<<<<
* grad_smooth[axis, l, m] -= 2 * df_dx2
* grad_smooth[axis, l + 1, m] += df_dx2
*/
}
/* "pysteps/motion/_vet.pyx":609
* inloop_smoothness_penalty = (df_dx2 * df_dx2
* + 2 * df_dxdy * df_dxdy
* + df_dy2 * df_dy2) # <<<<<<<<<<<<<<
*
* smoothness_penalty += inloop_smoothness_penalty
*/
__pyx_v_inloop_smoothness_penalty = (((__pyx_v_df_dx2 * __pyx_v_df_dx2) + ((2.0 * __pyx_v_df_dxdy) * __pyx_v_df_dxdy)) + (__pyx_v_df_dy2 * __pyx_v_df_dy2));
/* "pysteps/motion/_vet.pyx":611
* + df_dy2 * df_dy2)
*
* smoothness_penalty += inloop_smoothness_penalty # <<<<<<<<<<<<<<
*
* smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size
*/
__pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty + __pyx_v_inloop_smoothness_penalty);
}
}
}
/* "pysteps/motion/_vet.pyx":613
* smoothness_penalty += inloop_smoothness_penalty
*
* smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<<
*
* if gradient:
*/
__pyx_v_smoothness_penalty = (__pyx_v_smoothness_penalty * __pyx_v_smooth_gain);
/* "pysteps/motion/_vet.pyx":566
* cdef float64 inloop_smoothness_penalty
*
* if smooth_gain > 0.: # <<<<<<<<<<<<<<
*
* for axis in range(2): #, schedule='dynamic', nogil=True):
*/
}
/* "pysteps/motion/_vet.pyx":615
* smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size
*
* if gradient: # <<<<<<<<<<<<<<
* grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size
*
*/
__pyx_t_1 = (__pyx_v_gradient != 0);
if (__pyx_t_1) {
/* "pysteps/motion/_vet.pyx":616
*
* if gradient:
* grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size # <<<<<<<<<<<<<<
*
* return grad_residuals + grad_smooth
*/
__pyx_t_5 = PyFloat_FromDouble((2.0 * __pyx_v_smooth_gain)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyNumber_InPlaceMultiply(((PyObject *)__pyx_v_grad_smooth), __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 616, __pyx_L1_error)
__pyx_t_116 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer);
__pyx_t_9 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_t_116, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack);
if (unlikely(__pyx_t_9 < 0)) {
PyErr_Fetch(&__pyx_t_12, &__pyx_t_11, &__pyx_t_10);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer, (PyObject*)__pyx_v_grad_smooth, &__Pyx_TypeInfo_nn___pyx_t_7pysteps_6motion_4_vet_float64, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 3, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_12); Py_XDECREF(__pyx_t_11); Py_XDECREF(__pyx_t_10);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_12, __pyx_t_11, __pyx_t_10);
}
__pyx_t_12 = __pyx_t_11 = __pyx_t_10 = 0;
}
__pyx_pybuffernd_grad_smooth.diminfo[0].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_grad_smooth.diminfo[0].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_grad_smooth.diminfo[1].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_grad_smooth.diminfo[1].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_grad_smooth.diminfo[2].strides = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_grad_smooth.diminfo[2].shape = __pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer.shape[2];
if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 616, __pyx_L1_error)
}
__pyx_t_116 = 0;
__Pyx_DECREF_SET(__pyx_v_grad_smooth, ((PyArrayObject *)__pyx_t_4));
__pyx_t_4 = 0;
/* "pysteps/motion/_vet.pyx":618
* grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size
*
* return grad_residuals + grad_smooth # <<<<<<<<<<<<<<
* else:
* return residuals, smoothness_penalty
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_4 = PyNumber_Add(((PyObject *)__pyx_v_grad_residuals), ((PyObject *)__pyx_v_grad_smooth)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 618, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "pysteps/motion/_vet.pyx":615
* smoothness_penalty *= smooth_gain #* x_sector_size * y_sector_size
*
* if gradient: # <<<<<<<<<<<<<<
* grad_smooth *= 2 * smooth_gain #* x_sector_size * y_sector_size
*
*/
}
/* "pysteps/motion/_vet.pyx":620
* return grad_residuals + grad_smooth
* else:
* return residuals, smoothness_penalty # <<<<<<<<<<<<<<
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_4 = PyFloat_FromDouble(__pyx_v_residuals); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 620, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyFloat_FromDouble(__pyx_v_smoothness_penalty); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 620, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 620, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "pysteps/motion/_vet.pyx":240
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<<
* np.ndarray[float64, ndim=2] template_image,
* np.ndarray[float64, ndim=2] input_image,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_109);
__Pyx_XDECREF(__pyx_t_121);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("pysteps.motion._vet._cost_function", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd__gradient_data.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_buffer.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_residuals.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_grad_smooth.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_max.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_i_min.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_input_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_interp_coef.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_max.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_j_min.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_l_i.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_m_j.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morph_mask.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_morphed_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sector_displacement.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_template_image.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x_guess.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y_guess.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_displacement);
__Pyx_XDECREF((PyObject *)__pyx_v_x);
__Pyx_XDECREF((PyObject *)__pyx_v_y);
__Pyx_XDECREF((PyObject *)__pyx_v_x_guess);
__Pyx_XDECREF((PyObject *)__pyx_v_y_guess);
__Pyx_XDECREF((PyObject *)__pyx_v_interp_coef);
__Pyx_XDECREF((PyObject *)__pyx_v_l_i);
__Pyx_XDECREF((PyObject *)__pyx_v_m_j);
__Pyx_XDECREF((PyObject *)__pyx_v_i_min);
__Pyx_XDECREF((PyObject *)__pyx_v_i_max);
__Pyx_XDECREF((PyObject *)__pyx_v_j_min);
__Pyx_XDECREF((PyObject *)__pyx_v_j_max);
__Pyx_XDECREF(__pyx_v_counts);
__Pyx_XDECREF((PyObject *)__pyx_v_morphed_image);
__Pyx_XDECREF((PyObject *)__pyx_v_morph_mask);
__Pyx_XDECREF((PyObject *)__pyx_v__gradient_data);
__Pyx_XDECREF((PyObject *)__pyx_v_grad_residuals);
__Pyx_XDECREF((PyObject *)__pyx_v_grad_smooth);
__Pyx_XDECREF((PyObject *)__pyx_v_buffer);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyArray_Descr *__pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265
*
* cdef int i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266
* cdef int i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 272, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L7_bool_binop_done;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L7_bool_binop_done:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 276, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim))));
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L9;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L9:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = <dtype>PyArray_DESCR(self)
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_7 = PyArray_DESCR(__pyx_v_self);
__pyx_t_3 = ((PyObject *)__pyx_t_7);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300
* cdef int offset
*
* info.obj = self # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(descr):
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":303
*
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L15_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_L15_next_or:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L14_bool_binop_done:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_1)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 306, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":304
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
case NPY_UBYTE:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
__pyx_v_f = ((char *)"B");
break;
case NPY_SHORT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
__pyx_v_f = ((char *)"h");
break;
case NPY_USHORT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
__pyx_v_f = ((char *)"H");
break;
case NPY_INT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
__pyx_v_f = ((char *)"i");
break;
case NPY_UINT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":312
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
__pyx_v_f = ((char *)"I");
break;
case NPY_LONG:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":313
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
__pyx_v_f = ((char *)"l");
break;
case NPY_ULONG:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":314
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
__pyx_v_f = ((char *)"L");
break;
case NPY_LONGLONG:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":315
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
__pyx_v_f = ((char *)"q");
break;
case NPY_ULONGLONG:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":316
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
__pyx_v_f = ((char *)"Q");
break;
case NPY_FLOAT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":317
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
__pyx_v_f = ((char *)"f");
break;
case NPY_DOUBLE:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":318
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
__pyx_v_f = ((char *)"d");
break;
case NPY_LONGDOUBLE:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":319
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
__pyx_v_f = ((char *)"g");
break;
case NPY_CFLOAT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":320
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
__pyx_v_f = ((char *)"Zf");
break;
case NPY_CDOUBLE:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":321
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
__pyx_v_f = ((char *)"Zd");
break;
case NPY_CLONGDOUBLE:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":322
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
__pyx_v_f = ((char *)"Zg");
break;
case NPY_OBJECT:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":323
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_v_f = ((char *)"O");
break;
default:
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":325
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 325, __pyx_L1_error)
break;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":326
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":327
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":329
* return
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)PyObject_Malloc(0xFF));
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":330
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":331
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":332
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":335
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":258
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":339
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides)
*/
PyObject_Free(__pyx_v_info->format);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":338
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":341
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
PyObject_Free(__pyx_v_info->strides);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":340
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":337
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":821
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":828
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":830
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":836
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":848
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 851, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 852, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 853, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (unlikely(__pyx_t_6)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 856, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_6)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 860, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":870
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":871
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":872
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":875
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":878
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (unlikely(__pyx_t_6)) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 880, __pyx_L1_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":879
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":883
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":884
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":885
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":886
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":887
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":888
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":889
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":890
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":891
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":892
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":893
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":894
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":895
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":896
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":897
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":898
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":899
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(__pyx_t_6)) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":901
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 901, __pyx_L1_error)
}
__pyx_L15:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":902
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":877
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":906
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":907
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1027
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1029
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1028
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1030
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1026
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1036
* cdef inline int import_array() except -1:
* try:
* _import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1036, __pyx_L3_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1037
* try:
* _import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1037, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1038, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1038, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1035
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1034
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1042
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1042, __pyx_L3_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1043
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1043, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1044, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1044, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1041
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1040
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1048
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1048, __pyx_L3_error)
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1049
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1049, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1050
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1050, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1050, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1047
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec__vet(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec__vet},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"_vet",
__pyx_k_Cython_module_for_morphing_and, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_u_Error_computing_cost_function, __pyx_k_Error_computing_cost_function, sizeof(__pyx_k_Error_computing_cost_function), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_kp_u_The_number_of_sectors_in_x_axis, __pyx_k_The_number_of_sectors_in_x_axis, sizeof(__pyx_k_The_number_of_sectors_in_x_axis), 0, 1, 0, 0},
{&__pyx_kp_u_The_number_of_sectors_in_y_axis, __pyx_k_The_number_of_sectors_in_y_axis, sizeof(__pyx_k_The_number_of_sectors_in_y_axis), 0, 1, 0, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1},
{&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1},
{&__pyx_n_s_buffer, __pyx_k_buffer, sizeof(__pyx_k_buffer), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_cost_function, __pyx_k_cost_function, sizeof(__pyx_k_cost_function), 0, 0, 1, 1},
{&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1},
{&__pyx_n_s_df_dx2, __pyx_k_df_dx2, sizeof(__pyx_k_df_dx2), 0, 0, 1, 1},
{&__pyx_n_s_df_dxdy, __pyx_k_df_dxdy, sizeof(__pyx_k_df_dxdy), 0, 0, 1, 1},
{&__pyx_n_s_df_dy2, __pyx_k_df_dy2, sizeof(__pyx_k_df_dy2), 0, 0, 1, 1},
{&__pyx_n_s_displacement, __pyx_k_displacement, sizeof(__pyx_k_displacement), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_dx, __pyx_k_dx, sizeof(__pyx_k_dx), 0, 0, 1, 1},
{&__pyx_n_s_dy, __pyx_k_dy, sizeof(__pyx_k_dy), 0, 0, 1, 1},
{&__pyx_n_s_f00, __pyx_k_f00, sizeof(__pyx_k_f00), 0, 0, 1, 1},
{&__pyx_n_s_f01, __pyx_k_f01, sizeof(__pyx_k_f01), 0, 0, 1, 1},
{&__pyx_n_s_f10, __pyx_k_f10, sizeof(__pyx_k_f10), 0, 0, 1, 1},
{&__pyx_n_s_f11, __pyx_k_f11, sizeof(__pyx_k_f11), 0, 0, 1, 1},
{&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1},
{&__pyx_n_u_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 1, 0, 1},
{&__pyx_n_s_full, __pyx_k_full, sizeof(__pyx_k_full), 0, 0, 1, 1},
{&__pyx_n_s_grad_residuals, __pyx_k_grad_residuals, sizeof(__pyx_k_grad_residuals), 0, 0, 1, 1},
{&__pyx_n_s_grad_smooth, __pyx_k_grad_smooth, sizeof(__pyx_k_grad_smooth), 0, 0, 1, 1},
{&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1},
{&__pyx_n_s_gradient_data, __pyx_k_gradient_data, sizeof(__pyx_k_gradient_data), 0, 0, 1, 1},
{&__pyx_n_s_gradient_values, __pyx_k_gradient_values, sizeof(__pyx_k_gradient_values), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_i_max, __pyx_k_i_max, sizeof(__pyx_k_i_max), 0, 0, 1, 1},
{&__pyx_n_s_i_min, __pyx_k_i_min, sizeof(__pyx_k_i_min), 0, 0, 1, 1},
{&__pyx_n_s_i_sec, __pyx_k_i_sec, sizeof(__pyx_k_i_sec), 0, 0, 1, 1},
{&__pyx_n_s_i_shift, __pyx_k_i_shift, sizeof(__pyx_k_i_shift), 0, 0, 1, 1},
{&__pyx_n_s_image, __pyx_k_image, sizeof(__pyx_k_image), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_inloop_smoothness_penalty, __pyx_k_inloop_smoothness_penalty, sizeof(__pyx_k_inloop_smoothness_penalty), 0, 0, 1, 1},
{&__pyx_n_s_input_image, __pyx_k_input_image, sizeof(__pyx_k_input_image), 0, 0, 1, 1},
{&__pyx_n_s_int8, __pyx_k_int8, sizeof(__pyx_k_int8), 0, 0, 1, 1},
{&__pyx_n_s_interp_coef, __pyx_k_interp_coef, sizeof(__pyx_k_interp_coef), 0, 0, 1, 1},
{&__pyx_n_s_intp, __pyx_k_intp, sizeof(__pyx_k_intp), 0, 0, 1, 1},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_j_max, __pyx_k_j_max, sizeof(__pyx_k_j_max), 0, 0, 1, 1},
{&__pyx_n_s_j_min, __pyx_k_j_min, sizeof(__pyx_k_j_min), 0, 0, 1, 1},
{&__pyx_n_s_j_sec, __pyx_k_j_sec, sizeof(__pyx_k_j_sec), 0, 0, 1, 1},
{&__pyx_n_s_j_shift, __pyx_k_j_shift, sizeof(__pyx_k_j_shift), 0, 0, 1, 1},
{&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1},
{&__pyx_n_s_l0, __pyx_k_l0, sizeof(__pyx_k_l0), 0, 0, 1, 1},
{&__pyx_n_s_l1, __pyx_k_l1, sizeof(__pyx_k_l1), 0, 0, 1, 1},
{&__pyx_n_s_l_i, __pyx_k_l_i, sizeof(__pyx_k_l_i), 0, 0, 1, 1},
{&__pyx_n_s_ll, __pyx_k_ll, sizeof(__pyx_k_ll), 0, 0, 1, 1},
{&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1},
{&__pyx_n_s_m0, __pyx_k_m0, sizeof(__pyx_k_m0), 0, 0, 1, 1},
{&__pyx_n_s_m1, __pyx_k_m1, sizeof(__pyx_k_m1), 0, 0, 1, 1},
{&__pyx_n_s_m_j, __pyx_k_m_j, sizeof(__pyx_k_m_j), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1},
{&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1},
{&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1},
{&__pyx_n_s_morph_mask, __pyx_k_morph_mask, sizeof(__pyx_k_morph_mask), 0, 0, 1, 1},
{&__pyx_n_s_morphed_image, __pyx_k_morphed_image, sizeof(__pyx_k_morphed_image), 0, 0, 1, 1},
{&__pyx_n_s_morphed_mask, __pyx_k_morphed_mask, sizeof(__pyx_k_morphed_mask), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_new_image, __pyx_k_new_image, sizeof(__pyx_k_new_image), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0},
{&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0},
{&__pyx_n_s_nx, __pyx_k_nx, sizeof(__pyx_k_nx), 0, 0, 1, 1},
{&__pyx_n_s_ny, __pyx_k_ny, sizeof(__pyx_k_ny), 0, 0, 1, 1},
{&__pyx_n_s_pysteps_motion__vet, __pyx_k_pysteps_motion__vet, sizeof(__pyx_k_pysteps_motion__vet), 0, 0, 1, 1},
{&__pyx_kp_s_pysteps_motion__vet_pyx, __pyx_k_pysteps_motion__vet_pyx, sizeof(__pyx_k_pysteps_motion__vet_pyx), 0, 0, 1, 0},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1},
{&__pyx_n_s_residuals, __pyx_k_residuals, sizeof(__pyx_k_residuals), 0, 0, 1, 1},
{&__pyx_n_s_return_counts, __pyx_k_return_counts, sizeof(__pyx_k_return_counts), 0, 0, 1, 1},
{&__pyx_n_s_return_index, __pyx_k_return_index, sizeof(__pyx_k_return_index), 0, 0, 1, 1},
{&__pyx_n_s_sector_area, __pyx_k_sector_area, sizeof(__pyx_k_sector_area), 0, 0, 1, 1},
{&__pyx_n_s_sector_displacement, __pyx_k_sector_displacement, sizeof(__pyx_k_sector_displacement), 0, 0, 1, 1},
{&__pyx_n_s_smooth_gain, __pyx_k_smooth_gain, sizeof(__pyx_k_smooth_gain), 0, 0, 1, 1},
{&__pyx_n_s_smoothness_penalty, __pyx_k_smoothness_penalty, sizeof(__pyx_k_smoothness_penalty), 0, 0, 1, 1},
{&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1},
{&__pyx_n_s_template_image, __pyx_k_template_image, sizeof(__pyx_k_template_image), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_unique, __pyx_k_unique, sizeof(__pyx_k_unique), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_warp, __pyx_k_warp, sizeof(__pyx_k_warp), 0, 0, 1, 1},
{&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1},
{&__pyx_n_s_x_ceil, __pyx_k_x_ceil, sizeof(__pyx_k_x_ceil), 0, 0, 1, 1},
{&__pyx_n_s_x_float, __pyx_k_x_float, sizeof(__pyx_k_x_float), 0, 0, 1, 1},
{&__pyx_n_s_x_floor, __pyx_k_x_floor, sizeof(__pyx_k_x_floor), 0, 0, 1, 1},
{&__pyx_n_s_x_guess, __pyx_k_x_guess, sizeof(__pyx_k_x_guess), 0, 0, 1, 1},
{&__pyx_n_s_x_image_size, __pyx_k_x_image_size, sizeof(__pyx_k_x_image_size), 0, 0, 1, 1},
{&__pyx_n_s_x_max_float, __pyx_k_x_max_float, sizeof(__pyx_k_x_max_float), 0, 0, 1, 1},
{&__pyx_n_s_x_max_int, __pyx_k_x_max_int, sizeof(__pyx_k_x_max_int), 0, 0, 1, 1},
{&__pyx_n_s_x_sector_size, __pyx_k_x_sector_size, sizeof(__pyx_k_x_sector_size), 0, 0, 1, 1},
{&__pyx_n_s_x_sectors, __pyx_k_x_sectors, sizeof(__pyx_k_x_sectors), 0, 0, 1, 1},
{&__pyx_n_s_xy, __pyx_k_xy, sizeof(__pyx_k_xy), 0, 0, 1, 1},
{&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1},
{&__pyx_n_s_y_ceil, __pyx_k_y_ceil, sizeof(__pyx_k_y_ceil), 0, 0, 1, 1},
{&__pyx_n_s_y_float, __pyx_k_y_float, sizeof(__pyx_k_y_float), 0, 0, 1, 1},
{&__pyx_n_s_y_floor, __pyx_k_y_floor, sizeof(__pyx_k_y_floor), 0, 0, 1, 1},
{&__pyx_n_s_y_guess, __pyx_k_y_guess, sizeof(__pyx_k_y_guess), 0, 0, 1, 1},
{&__pyx_n_s_y_image_size, __pyx_k_y_image_size, sizeof(__pyx_k_y_image_size), 0, 0, 1, 1},
{&__pyx_n_s_y_max_float, __pyx_k_y_max_float, sizeof(__pyx_k_y_max_float), 0, 0, 1, 1},
{&__pyx_n_s_y_max_int, __pyx_k_y_max_int, sizeof(__pyx_k_y_max_int), 0, 0, 1, 1},
{&__pyx_n_s_y_sector_size, __pyx_k_y_sector_size, sizeof(__pyx_k_y_sector_size), 0, 0, 1, 1},
{&__pyx_n_s_y_sectors, __pyx_k_y_sectors, sizeof(__pyx_k_y_sectors), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{&__pyx_n_s_zip, __pyx_k_zip, sizeof(__pyx_k_zip), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 163, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 357, __pyx_L1_error)
__pyx_builtin_zip = __Pyx_GetBuiltinName(__pyx_n_s_zip); if (!__pyx_builtin_zip) __PYX_ERR(0, 464, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 856, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1038, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "pysteps/motion/_vet.pyx":357
*
* if x_image_size % x_sectors != 0:
* raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<<
* "The number of sectors in x axis (axis=0)"
* + " don't divide the image size")
*/
__pyx_tuple_ = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_x_axis); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 357, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "pysteps/motion/_vet.pyx":362
*
* if y_image_size % y_sectors != 0:
* raise ValueError("Error computing cost function.\n", # <<<<<<<<<<<<<<
* "The number of sectors in y axis (axis=1) don't"
* + " divide the image size")
*/
__pyx_tuple__2 = PyTuple_Pack(2, __pyx_kp_u_Error_computing_cost_function, __pyx_kp_u_The_number_of_sectors_in_y_axis); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 362, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "pysteps/motion/_vet.pyx":504
* buffer[morph_mask == 1] = 0
*
* _gradient_data[0, :] *= buffer # <<<<<<<<<<<<<<
* _gradient_data[1, :] *= buffer
*
*/
__pyx_slice__3 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__3);
__Pyx_GIVEREF(__pyx_slice__3);
__pyx_tuple__4 = PyTuple_Pack(2, __pyx_int_0, __pyx_slice__3); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "pysteps/motion/_vet.pyx":505
*
* _gradient_data[0, :] *= buffer
* _gradient_data[1, :] *= buffer # <<<<<<<<<<<<<<
*
* for l in range(x_sectors): # schedule='dynamic', nogil=True):
*/
__pyx_tuple__5 = PyTuple_Pack(2, __pyx_int_1, __pyx_slice__3); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 505, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 272, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 276, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":306
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 306, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 856, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":880
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 880, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1038
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 1038, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1044
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "pysteps/motion/_vet.pyx":67
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<<
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
*/
__pyx_tuple__13 = PyTuple_Pack(27, __pyx_n_s_image, __pyx_n_s_mask, __pyx_n_s_displacement, __pyx_n_s_gradient, __pyx_n_s_nx, __pyx_n_s_ny, __pyx_n_s_new_image, __pyx_n_s_morphed_mask, __pyx_n_s_gradient_values, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_max_int, __pyx_n_s_y_max_int, __pyx_n_s_x_max_float, __pyx_n_s_y_max_float, __pyx_n_s_x_float, __pyx_n_s_y_float, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_x_floor, __pyx_n_s_x_ceil, __pyx_n_s_y_floor, __pyx_n_s_y_ceil, __pyx_n_s_f00, __pyx_n_s_f10, __pyx_n_s_f01, __pyx_n_s_f11); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
__pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(4, 0, 27, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_warp, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 67, __pyx_L1_error)
/* "pysteps/motion/_vet.pyx":240
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<<
* np.ndarray[float64, ndim=2] template_image,
* np.ndarray[float64, ndim=2] input_image,
*/
__pyx_tuple__15 = PyTuple_Pack(54, __pyx_n_s_sector_displacement, __pyx_n_s_template_image, __pyx_n_s_input_image, __pyx_n_s_mask, __pyx_n_s_smooth_gain, __pyx_n_s_gradient, __pyx_n_s_x_sectors, __pyx_n_s_y_sectors, __pyx_n_s_x_image_size, __pyx_n_s_y_image_size, __pyx_n_s_x_sector_size, __pyx_n_s_y_sector_size, __pyx_n_s_displacement, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_xy, __pyx_n_s_l, __pyx_n_s_m, __pyx_n_s_ll, __pyx_n_s_mm, __pyx_n_s_i_sec, __pyx_n_s_j_sec, __pyx_n_s_l0, __pyx_n_s_m0, __pyx_n_s_l1, __pyx_n_s_m1, __pyx_n_s_i_shift, __pyx_n_s_j_shift, __pyx_n_s_axis, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_x_guess, __pyx_n_s_y_guess, __pyx_n_s_sector_area, __pyx_n_s_interp_coef, __pyx_n_s_l_i, __pyx_n_s_m_j, __pyx_n_s_i_min, __pyx_n_s_i_max, __pyx_n_s_j_min, __pyx_n_s_j_max, __pyx_n_s_counts, __pyx_n_s_morphed_image, __pyx_n_s_morph_mask, __pyx_n_s_gradient_data, __pyx_n_s_grad_residuals, __pyx_n_s_grad_smooth, __pyx_n_s_buffer, __pyx_n_s_residuals, __pyx_n_s_smoothness_penalty, __pyx_n_s_df_dx2, __pyx_n_s_df_dxdy, __pyx_n_s_df_dy2, __pyx_n_s_inloop_smoothness_penalty); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
__pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(6, 0, 54, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pysteps_motion__vet_pyx, __pyx_n_s_cost_function, 240, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 240, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 918, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC init_vet(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC init_vet(void)
#else
__Pyx_PyMODINIT_FUNC PyInit__vet(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit__vet(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec__vet(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module '_vet' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__vet(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("_vet", __pyx_methods, __pyx_k_Cython_module_for_morphing_and, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_pysteps__motion___vet) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "pysteps.motion._vet")) {
if (unlikely(PyDict_SetItemString(modules, "pysteps.motion._vet", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error;
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error;
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
(void)__Pyx_modinit_type_init_code();
if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "pysteps/motion/_vet.pyx":8
* """
* from cython.parallel import prange, parallel
* import numpy as np # <<<<<<<<<<<<<<
*
* cimport cython
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pysteps/motion/_vet.pyx":67
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _warp(np.ndarray[float64, ndim=2] image, # <<<<<<<<<<<<<<
* np.ndarray[int8, ndim=2] mask,
* np.ndarray[float64, ndim=3] displacement,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_1_warp, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_warp, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pysteps/motion/_vet.pyx":240
* @cython.nonecheck(False)
* @cython.cdivision(True)
* def _cost_function(np.ndarray[float64, ndim=3] sector_displacement, # <<<<<<<<<<<<<<
* np.ndarray[float64, ndim=2] template_image,
* np.ndarray[float64, ndim=2] input_image,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7pysteps_6motion_4_vet_3_cost_function, NULL, __pyx_n_s_pysteps_motion__vet); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_cost_function, __pyx_t_1) < 0) __PYX_ERR(0, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "pysteps/motion/_vet.pyx":1
* # -*- coding: utf-8 -*- # <<<<<<<<<<<<<<
*
* """
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../pysteps_env/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1046
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init pysteps.motion._vet", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init pysteps.motion._vet");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((size_t)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* IterFinish */
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/* UnpackItemEndCheck */
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a - b);
if (likely((x^a) >= 0 || (x^~b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_subtract(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2);
}
}
x = a - b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla - llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("subtract", return NULL)
result = ((double)a) - (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2);
}
#endif
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* DictGetItem */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
if (unlikely(PyTuple_Check(key))) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args)) {
PyErr_SetObject(PyExc_KeyError, args);
Py_DECREF(args);
}
} else {
PyErr_SetObject(PyExc_KeyError, key);
}
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_Py_intptr_t(Py_intptr_t value) {
const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(Py_intptr_t) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(Py_intptr_t) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE Py_intptr_t __Pyx_PyInt_As_Py_intptr_t(PyObject *x) {
const Py_intptr_t neg_one = (Py_intptr_t) ((Py_intptr_t) 0 - (Py_intptr_t) 1), const_zero = (Py_intptr_t) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(Py_intptr_t) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (Py_intptr_t) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (Py_intptr_t) 0;
case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, digits[0])
case 2:
if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) >= 2 * PyLong_SHIFT) {
return (Py_intptr_t) (((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) >= 3 * PyLong_SHIFT) {
return (Py_intptr_t) (((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) >= 4 * PyLong_SHIFT) {
return (Py_intptr_t) (((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (Py_intptr_t) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(Py_intptr_t) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (Py_intptr_t) 0;
case -1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(Py_intptr_t, digit, +digits[0])
case -2:
if (8 * sizeof(Py_intptr_t) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
return (Py_intptr_t) (((Py_intptr_t)-1)*(((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(Py_intptr_t) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
return (Py_intptr_t) ((((((Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(Py_intptr_t) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(Py_intptr_t) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
return (Py_intptr_t) ((((((((Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(Py_intptr_t) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) {
return (Py_intptr_t) (((Py_intptr_t)-1)*(((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(Py_intptr_t) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(Py_intptr_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(Py_intptr_t) - 1 > 4 * PyLong_SHIFT) {
return (Py_intptr_t) ((((((((((Py_intptr_t)digits[3]) << PyLong_SHIFT) | (Py_intptr_t)digits[2]) << PyLong_SHIFT) | (Py_intptr_t)digits[1]) << PyLong_SHIFT) | (Py_intptr_t)digits[0])));
}
}
break;
}
#endif
if (sizeof(Py_intptr_t) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(Py_intptr_t) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(Py_intptr_t, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
Py_intptr_t val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (Py_intptr_t) -1;
}
} else {
Py_intptr_t val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (Py_intptr_t) -1;
val = __Pyx_PyInt_As_Py_intptr_t(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to Py_intptr_t");
return (Py_intptr_t) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to Py_intptr_t");
return (Py_intptr_t) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
IndexHNSWlib.h | #pragma once
#include <faiss/Index.h>
#include <faiss/hnswlib/hnswlib.h>
#include <faiss/impl/FaissAssert.h>
namespace faiss {
struct IndexHNSWlib : Index {
float scale;
float bias;
IndexHNSWlib(size_t d, MetricType metric) : Index(d, metric), scale(1.0f), bias(0.0f) {
}
virtual ~IndexHNSWlib() {
}
virtual void setEFConstruction(size_t ef) = 0;
virtual void setEFSearch(size_t ef) = 0;
virtual size_t getEfConstruction() = 0;
virtual size_t getEfSearch() = 0;
virtual void save(FILE* file) const = 0;
template <typename Tdist, typename Tcorr>
static hnswlib::SpaceInterface<Tdist>* createSpace(size_t d, MetricType metric) {
if(metric == METRIC_INNER_PRODUCT) {
return new hnswlib::InnerProductSpace<Tdist, Tcorr>(d);
}
else if(metric == METRIC_L2) {
return new hnswlib::L2Space<Tdist, Tcorr>(d);
}
else {
FAISS_THROW_FMT("unsupported metric: %d", metric);
}
}
template <typename T>
const T* convertVector(size_t d, const float* x, T) const {
T* tx = new T [d];
for(size_t i = 0; i < d; i++) {
tx[i] = T(x[i] * scale + bias);
}
return tx;
}
template <typename T>
void deconvertVector(size_t d, const T* x, float* fx) const{
for(size_t i = 0 ; i < d; i++){
fx[i] = ((float)x[i] - bias) / scale;
}
}
template <typename T>
inline void deleteConvertedVector(const T* tx) const {
delete[] tx;
}
inline const float* convertVector(size_t, const float* x, float) const {
return x;
}
// inline void deconvertVector(size_t, const float* x, float* fx) const{
// fx = x;
// }
inline void deleteConvertedVector(const float*) const {
}
};
template <typename Tdist, typename Tcorr>
struct IndexHNSWlibImpl : IndexHNSWlib {
const size_t INIT_MAX_ELEMENTS = 1UL << 20;
hnswlib::HierarchicalNSW<Tdist>* hnsw;
IndexHNSWlibImpl(size_t d, size_t M, MetricType metric = METRIC_L2) :
IndexHNSWlib(d, metric) {
hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric),
new hnswlib::VmemLevel0, INIT_MAX_ELEMENTS, M);
}
IndexHNSWlibImpl(size_t d, FILE* file, MetricType metric = METRIC_L2) :
IndexHNSWlib (d, metric) {
hnswlib::Level0StorageInterface* storage;
char* env_pmem = getenv("USE_PMEM");
if(env_pmem && strcmp(env_pmem, "1") == 0) {
storage = new hnswlib::PmemLevel0;
}
else {
storage = new hnswlib::VmemLevel0;
}
hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric),
storage, file);
}
~IndexHNSWlibImpl() {
delete hnsw;
}
void setEFConstruction(size_t ef) override {
hnsw->setEfConstruction(ef);
}
void setEFSearch(size_t ef) override {
hnsw->setEfSearch(ef);
}
size_t getEfConstruction() override{
return hnsw->getEfConstruction();
}
size_t getEfSearch() override{
return hnsw->getEfSearch();
}
void save(FILE* file) const {
hnsw->saveIndex(file);
}
void add(idx_t n, const float* x) override {
add_with_ids(n, x, nullptr);
}
void add_with_ids(idx_t n, const float* x, const idx_t* xids) override {
size_t max_elements = hnsw->getMaxElement();
bool need_resize = false;
while(ntotal + n > max_elements) {
need_resize = true;
max_elements = size_t(max_elements * 1.5);
}
if(need_resize) {
hnsw->resizeIndex(max_elements);
}
FAISS_ASSERT (ntotal + n <= hnsw->getMaxElement());
#pragma omp parallel for
for(idx_t i = 0; i < n; i++) {
const float* xi = x + d * i;
const Tcorr* txi = convertVector(d, xi, Tcorr());
hnsw->addPoint(txi, xids ? xids[i] : ntotal + i);
deleteConvertedVector(txi);
}
ntotal += n;
if(verbose) {
printf("%lu vectors newly added, now %lu totally\n", n, ntotal);
}
}
void reset() override {
size_t M = hnsw->getM();
size_t ef_construction = hnsw->getEfConstruction();
size_t ef_search = hnsw->getEfSearch();
delete hnsw;
hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric_type),
new hnswlib::VmemLevel0, INIT_MAX_ELEMENTS, M, ef_construction);
hnsw->setEfSearch(ef_search);
ntotal = 0;
}
void reconstruct (idx_t key, float* recons) const override{
Tcorr* temp = (Tcorr*)(hnsw->getDataByInternalId(key));
deconvertVector(d, temp, recons);
}
void search(idx_t n, const float* x,
idx_t k, float* distances, idx_t* labels) const override {
#pragma omp parallel for
for(idx_t i = 0; i < n; i++) {
const float* xi = x + i * d;
const Tcorr* txi = convertVector(d, xi, Tcorr());
auto topk = hnsw->searchKnn(txi, k);
deleteConvertedVector(txi);
float* distances_i = distances + (i + 1) * k - 1;
idx_t* labels_i = labels + (i + 1) * k - 1;
for(idx_t j = 0; j < k; j++) {
auto& entry = topk.top();
*distances_i = float(entry.first);
*labels_i = entry.second;
distances_i--;
labels_i--;
topk.pop();
}
}
}
};
using IndexHNSWlibFp32 = IndexHNSWlibImpl<float, float>;
using IndexHNSWlibBfp16 = IndexHNSWlibImpl<float, bfp16_t>;
using IndexHNSWlibInt16 = IndexHNSWlibImpl<int64_t, int16_t>;
using IndexHNSWlibInt8 = IndexHNSWlibImpl<int, int8_t>;
}
|
tzvjsvd.c | #include "aalloc.h"
#include "mtxio.h"
#include "pjs.h"
#include "timer.h"
#include "zmerge.h"
#include "zsplit.h"
#include "zvjsvd.h"
int main(int argc, char *argv[])
{
(void)set_cbwr();
if (argc != 5) {
(void)fprintf(stderr, "%s J M N BaseName\n", *argv);
return 1;
}
const fnat m = (fnat)atoz(argv[2u]);
if (!m)
return 3;
fnat ldG = m, ldGr = m, ldGi = m;
const fnat n = (fnat)atoz(argv[3u]);
if (!n)
return 4;
if (n > m)
return 4;
if (n & 1u)
return 4;
if ((n >> 1u) & VDL_1)
return 4;
fnat ldV = n, ldVr = n, ldVi = n;
const long j = atol(argv[1u]);
switch (j) {
case PJS_ME:
case PJS_MM:
break;
default:
return 2;
}
unsigned stp = 0u;
const unsigned *const js = pjs(j, (unsigned)n, &stp);
if (!js)
return 2;
const char *const bn = argv[4u];
if (!*bn)
return 5;
const int gd = open_ro_(bn, "G");
if (gd < 0)
return 5;
double complex *G = (double complex*)NULL;
double *Gr = (double*)NULL;
double *Gi = (double*)NULL;
if (zalloc2_(&m, &n, &G, &ldG, &Gr, &ldGr, &Gi, &ldGi) < 0)
return 6;
if (zread2_(&m, &n, G, &ldG, &gd))
return 5;
if (close(gd))
return 5;
unsigned rd[2u] = { 0u, 0u };
const uint64_t hz = tsc_get_freq_hz_(rd);
uint64_t b = rdtsc_beg(rd);
if (zsplit_(&m, &n, G, &ldG, Gr, &ldGr, Gi, &ldGi) < 0)
return 6;
uint64_t e = rdtsc_end(rd);
const long double ts = tsc_lap(hz, b, e);
double complex *V = (double complex*)NULL;
double *Vr = (double*)NULL;
double *Vi = (double*)NULL;
if (zalloc2_(&n, &n, &V, &ldV, &Vr, &ldVr, &Vi, &ldVi) < 0)
return 7;
double *const w = (double*)aligned_alloc(VA, (7u * (n * sizeof(double))));
if (!w)
return 8;
double *const eS = w;
double *const fS = eS + n;
double *const work = fS + n;
wide *const ws = (wide*)work;
#ifdef JTRACE
(void)sprintf((char*)work, "%s.%ld", bn,
#ifdef _OPENMP
j
#else /* !_OPENMP */
(j - 1l)
#endif /* ?_OPENMP */
);
#endif /* JTRACE */
unsigned *const iwork = (unsigned*)aligned_alloc(VA, ((n >> VDLlg) * sizeof(unsigned)));
if (!iwork)
return 9;
const unsigned swp = 999u;
b = rdtsc_beg(rd);
const fint o = zvjsvd_(&m, &n, Gr, &ldGr, Gi, &ldGi, Vr, &ldVr, Vi, &ldVi, eS, fS, js, &stp, &swp, work, iwork);
e = rdtsc_end(rd);
const long double tj = tsc_lap(hz, b, e);
free(iwork);
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(eS,fS,ws,n)
#endif /* _OPENMP */
for (fnat i = 0u; i < n; ++i) {
if (eS[i] != 0.0)
ws[i] = scalbw(fS[i], eS[i]);
else // 2^0 == 1
ws[i] = fS[i];
}
const int sd = open_wo_(bn,
#ifdef _OPENMP
((j == PJS_ME) ? "S2" : "S4")
#else /* !_OPENMP */
((j == PJS_ME) ? "S1" : "S3")
#endif /* ?_OPENMP */
);
if (sd < 0)
return 10;
*(size_t*)js = (n * sizeof(wide));
if (resizef_(&sd, (const size_t*)js))
return 10;
const fnat n2 = (n << 1u), n1 = 1u;
if (dwrite2_(&n2, &n1, work, &n2, &sd))
return 10;
if (close(sd))
return 10;
free(w);
b = rdtsc_beg(rd);
if (zmerge_(&n, &n, Vr, &ldVr, Vi, &ldVi, V, &ldV) < 0)
return 11;
e = rdtsc_end(rd);
const long double tv = tsc_lap(hz, b, e);
free(Vi);
free(Vr);
const int vd = open_wo_(bn,
#ifdef _OPENMP
((j == PJS_ME) ? "V2" : "V4")
#else /* !_OPENMP */
((j == PJS_ME) ? "V1" : "V3")
#endif /* ?_OPENMP */
);
if (vd < 0)
return 12;
*(size_t*)js = (n * (n * sizeof(double complex)));
if (resizef_(&vd, (const size_t*)js))
return 12;
if (zwrite2_(&n, &n, V, &ldV, &vd))
return 12;
if (close(vd))
return 12;
free(V);
b = rdtsc_beg(rd);
if (zmerge_(&m, &n, Gr, &ldGr, Gi, &ldGi, G, &ldG) < 0)
return 13;
e = rdtsc_end(rd);
const long double tg = tsc_lap(hz, b, e);
free(Gi);
free(Gr);
const int ud = open_wo_(bn,
#ifdef _OPENMP
((j == PJS_ME) ? "U2" : "U4")
#else /* !_OPENMP */
((j == PJS_ME) ? "U1" : "U3")
#endif /* ?_OPENMP */
);
if (ud < 0)
return 14;
*(size_t*)js = (m * (n * sizeof(double complex)));
if (resizef_(&ud, (const size_t*)js))
return 14;
if (zwrite2_(&m, &n, G, &ldG, &ud))
return 14;
if (close(ud))
return 14;
free(G);
(void)fprintf(stdout, "\"%s\",%1ld,%4llu,%4llu,%15.9Lf,%15.9Lf,%3lld,%15.9Lf,%15.9Lf\n", bn,
#ifdef _OPENMP
j
#else /* !_OPENMP */
(j - 1l)
#endif /* ?_OPENMP */
, (unsigned long long)m, (unsigned long long)n, ts, tj, (long long)o, tv, tg);
(void)fflush(stdout);
free((void*)js);
return EXIT_SUCCESS;
}
|
GB_binop.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02)
// A.*B function (eWiseMult): GB (_AemultB_03)
// A.*B function (eWiseMult): GB (_AemultB_bitmap)
// A*D function (colscale): GB (_AxD)
// D*A function (rowscale): GB (_DxB)
// C+=B function (dense accum): GB (_Cdense_accumB)
// C+=b function (dense accum): GB (_Cdense_accumb)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum)
// C=scalar+B GB (_bind1st)
// C=scalar+B' GB (_bind1st_tran)
// C=A+scalar GB (_bind2nd)
// C=A'+scalar GB (_bind2nd_tran)
// C type: GB_ctype
// A type: GB_atype
// B,b type: GB_btype
// BinaryOp: GB_binaryop(cij,aij,bij,i,j)
#define GB_ATYPE \
GB_atype
#define GB_BTYPE \
GB_btype
#define GB_CTYPE \
GB_ctype
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
GB_atype_is_btype
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
GB_ctype_is_atype
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
GB_ctype_is_btype
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GB_geta(aij,Ax,pA)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GB_getb(bij,Bx,pB)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GB_ctype t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
GB_copy_a_to_c(cij,Ax,pA)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
GB_copy_b_to_c(cij,Bx,pB)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
GB_binaryop(z, x, y, i, j) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
GB_binaryop_flip
// op is second
#define GB_OP_IS_SECOND \
GB_op_is_second
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
GB_disable
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
if_is_binop_subset
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
endif_is_binop_subset
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
#include "GB_dense_subassign_23_template.c"
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
// get the scalar b for C += b, of type GB_btype
GB_btype bwork = (*((GB_btype *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_AxD)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB (_DxB)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *restrict Cx = (GB_ctype *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
if_binop_bind1st_is_enabled
GrB_Info GB (_bind1st)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype x = (*((GB_atype *) x_input)) ;
GB_btype *Bx = (GB_btype *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GB_getb(bij, Bx, p) ;
GB_binaryop(Cx [p], x, bij, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind1st_is_enabled
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
if_binop_bind2nd_is_enabled
GrB_Info GB (_bind2nd)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype *Ax = (GB_atype *) Ax_input ;
GB_btype y = (*((GB_btype *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GB_geta(aij, Ax, p) ;
GB_binaryop(Cx [p], aij, y, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind2nd_is_enabled
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind1st_is_enabled
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_getb(aij, Ax, pA) ; \
GB_binaryop(Cx [pC], x, aij, 0, 0) ; \
}
GrB_Info GB (_bind1st_tran)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GB_btype
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_atype x = (*((const GB_atype *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GB_atype
}
endif_binop_bind1st_is_enabled
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind2nd_is_enabled
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_geta(aij, Ax, pA) ; \
GB_binaryop(Cx [pC], aij, y, 0, 0) ; \
}
GrB_Info GB (_bind2nd_tran)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_btype y = (*((const GB_btype *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind2nd_is_enabled
#endif
|
Searching.202008062049.computation_quota.sections.h | //
// Created by Zhen Peng on 8/6/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
uint64_t thread_compuation_quota_ = 0;
std::vector<uint64_t> threads_computations_;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static idi merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_queues_of_four(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi group_id,
const idi local_queue_capacity,
const idi master_queue_capacity);
idi merge_all_queues_to_master(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi local_queue_capacity,
const idi local_master_queue_capacity,
const idi master_queue_capacity,
const idi group_size);
idi master_top_m_to_groups(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
const idi k_uc,
idi &last_k,
const idi M,
const idi num_groups);
// const idi group_size);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
uint64_t count_full_merge_ = 0;
// uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
// double time_merge_ = 0;
double time_gather_ = 0;
double time_move_top_m_ = 0;
double time_full_merge_ = 0;
// double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
// double time_initialization_ = 0;
// double time_sequential_phase_ = 0;
// double time_parallel_phase_ = 0;
// double time_ending_ = 0.0;
// double time_assign_s_ = 0.0;
// double time_expand_ = 0.0;
// double time_pick_top_m_ = 0.0;
// double time_distance_computation_ = 0.0;
// double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void seq_search_with_top_m_double_m(
const idi M_max,
const idi query_id,
const idi K,
const idi global_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
idi expand_one_candidate(
idi cand_id,
const dataf *query_data,
const distf &dist_bound,
std::vector<Candidate> &set_L,
const idi local_queue_start,
idi &local_queue_size,
const idi &local_queue_capacity,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_computation);
void para_search_with_top_m_hierarchy_merge_v1(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue
std::vector<idi> &top_m_candidate,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq);
void group_search_for_one_iteration(
const idi g_i,
const dataf *query_data,
const idi M_group,
const idi L,
std::vector<Candidate> &set_L,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
const idi top_m_candidates_start,
idi &top_m_candidates_size,
boost::dynamic_bitset<> &is_visited,
idi &k_uc,
idi &last_k,
idi &nk,
const idi para_iter,
const idi group_size, // Should be 4
const idi num_groups,
const idi full_merge_freq,
bool &is_finished,
uint64_t &group_distance_computation);
void para_search_with_top_m_hierarchy_merge_v2(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq);
void para_search_with_top_m_less_sync_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi full_merge_freq,
const idi local_iter_bound);
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return insert_index;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// // Remain, prefix-sum-like merge
// if (size != num_queues) {
// for (int i = size; i < num_queues; ++i) {
// idi ai = i;
// idi a_start = ai * local_queue_length;
// idi bi = i - 1;
// idi b_start = bi * local_queue_length;
// if (0 == local_queues_ends[bi]) {
// continue;
// }
// if (local_queues_ends[ai] == 0) {
// std::copy(set_L.begin() + b_start,
// set_L.begin() + b_start + local_queues_ends[bi],
// set_L.begin() + a_start); // Copy bi to ai
// local_queues_ends[ai] = local_queues_ends[bi];
// local_queues_ends[bi] = 0;
// continue;
// }
// if (ai != static_cast<idi>(num_queues - 1)) {
// merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_ends[ai],
// local_queue_length,
// set_L,
// b_start,
// local_queues_ends[bi]);
// } else {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// a_start,
// L,
// set_L,
// b_start,
// local_queues_ends[bi]);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/*
* Function: merge 4 queues into the last queue
*/
inline idi Searching::merge_queues_of_four(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi group_id,
const idi local_queue_capacity,
const idi master_queue_capacity)
{
// const int num_queues = 4;
const idi group_start = group_id * 4;
idi nk = master_queue_capacity;
#pragma omp parallel for num_threads(2)
for (int i = 0; i < 2; ++i) {
const idi bi = 2 * i + group_start;
const idi ai = bi + 1;
if (!local_queues_sizes[bi]) {
continue;
}
if (!local_queues_sizes[ai]) {
std::copy(
set_L.begin() + local_queues_starts[bi],
set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi],
set_L.begin() + local_queues_starts[ai]);
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
continue;
}
if (ai != 3 + group_start) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
local_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
master_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
if (r < nk) {
nk = r;
}
}
local_queues_sizes[bi] = 0;
}
{
const idi bi = 1 + group_start;
const idi ai = 3 + group_start;
if (!local_queues_sizes[bi]) {
return nk;
}
if (!local_queues_sizes[ai]) {
std::copy(
set_L.begin() + local_queues_starts[bi],
set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi],
set_L.begin() + local_queues_starts[ai]);
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
return 0;
}
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
master_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
if (r < nk) {
nk = r;
}
local_queues_sizes[bi] = 0;
}
return nk;
}
/*
* Function: used by hierarchical merging idea.
* Merge all queues into the last queue.
* Difference with merge_all_queues_para_array: here the last queue might not have L elements in the beginning,
* so use merge_two_queues_into_1st_queue_seq_incr(), not merge_two_queues_into_1st_queue_seq_fixed().
*/
inline idi Searching::merge_all_queues_to_master(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi local_queue_capacity,
const idi local_master_queue_capacity,
const idi master_queue_capacity,
const idi group_size)
{
const idi num_queues = num_threads_;
idi nk = master_queue_capacity;
int size = num_queues;
// int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
idi num_t = num_queues >> 1;
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
//#pragma omp parallel for num_threads(2)
//#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
// idi a_start = ai * local_queue_capacity;
idi a_start = local_queues_starts[ai];
idi bi = i + (1 << d) - 1; // i + 2^d - 1
// idi b_start = bi * local_queue_capacity;
idi b_start = local_queues_starts[bi];
if (0 == local_queues_sizes[bi]) {
continue;
}
{//test
printf("local_queues_sizes[%u]: %u\n",
bi, local_queues_sizes[bi]);
}
if (local_queues_sizes[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_sizes[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
continue;
}
if ((group_size - 1) != ai % 4) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
local_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
} else if (num_queues - 1 != ai) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
local_master_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
master_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
if (ai == num_queues - 1 && r < nk) {
nk = r;
}
}
local_queues_sizes[bi] = 0;
}
num_t >>= 1;
}
// Reset local_queues_sizes
// Not do this for Collector Idea or Selecting Idea
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end() - 1, 0);
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
return nk;
}
/*
* Function: distribute master queue's top-M unchecked elements to top_m_candidates.
* Used by hierarchical merging idea.
*/
inline idi Searching::master_top_m_to_groups(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
const idi k_uc,
idi &last_k,
const idi M,
const idi num_groups)
// const idi group_size)
{
const idi last_queue_start = local_queues_starts[num_threads_ - 1];
idi c_i_start = k_uc + last_queue_start;
idi c_i_bound = last_queue_start + local_queues_sizes[num_threads_ - 1];
idi top_m_count = 0;
for (idi c_i = c_i_start; c_i < c_i_bound && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i - last_queue_start;
set_L[c_i].is_checked_ = true;
idi g_i = top_m_count & (num_groups - 1);
// idi g_i = top_m_count % num_groups;
++top_m_count;
top_m_candidates[top_m_candidates_starts[g_i] + top_m_candidates_sizes[g_i]++] = set_L[c_i].id_;
}
return top_m_count;
}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
}
// else {
// M = value_M_max;
// }
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
expand_one_candidate(
cand_id,
query_data,
set_L[set_L_size - 1 + set_L_start].distance_,
set_L,
set_L_start,
set_L_size,
L,
is_visited,
count_distance_computation);
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 7/31/2020-12:48
* Use for profile. Sequential Double-M.
*/
inline void Searching::seq_search_with_top_m_double_m(
const idi M_max,
const idi query_id,
const idi K,
const idi global_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
std::vector<idi> top_m_candidates(M_max);
boost::dynamic_bitset<> is_visited(num_v_);
uint64_t tmp_count_computation = 0;
idi set_L_size;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
set_L_size = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
// time_initialization_ += WallTimer::get_time_mark();
// Searching
subsearch_with_top_m(
M_max,
query_id,
global_L,
set_L,
0,
set_L_size,
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
// time_merge_ += WallTimer::get_time_mark();
{
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
// set_K[k_i] = set_L[k_i].id_;
}
}
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
// }
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* 8/6/2020-21:08
* The same procedure with Middle-M, but do hierarchical merging to reduce merging frequency.
* Right now there are only 3 levels (1 middle level). And 4 workers form a group.
*/
inline void Searching::para_search_with_top_m_hierarchy_merge_v1(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
// std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq)
{
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
const idi master_queue_start = local_queues_starts[num_threads_ - 1];
const idi num_groups = (num_threads_ - 1) / group_size + 1; // 4 workers per group.
const dataf *query_data = queries_load_ + query_id * dimension_;
// Initialization Phase
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
uint64_t tmp_count_computation = 0;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
std::sort(
set_L.begin() + master_queue_start,
set_L.begin() + master_queue_start + L);
local_queues_sizes[num_threads_ - 1] = L;
} // Initialization Phase
// time_initialization_ += WallTimer::get_time_mark();
// idi top_m_candidates_end = 0;
idi iter = 0; // for debug
idi M = 1;
idi k = 0; // Index of first unchecked candidate.
// Sequential Phase
{
uint64_t tmp_count_computation = 0;
while (k < L && M < value_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
L,
set_L,
master_queue_start,
local_queues_sizes[num_threads_ - 1],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
threads_computations_[0] += tmp_count_computation;
tmp_count_computation = 0;
if (threads_computations_[0] >= thread_compuation_quota_) {
break;
}
{// Double M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} // Sequential Phase
// if (M < static_cast<idi>(num_threads_)) {
// M = num_threads_;
// }
// Divide computation cost from thread 0 to others
{
// printf("threads_computations_[0]: %lu\n",
// threads_computations_[0]);
std::fill(
threads_computations_.begin(),
threads_computations_.end(),
threads_computations_[0] / num_threads_);
}
// Parallel Phase
idi para_iter = 0;
// if (true) {
if (num_threads_ <= 4) {
idi top_m_candidates_size = 0;
idi last_k;
idi nk;
uint64_t tmp_count_computation = 0;
while (true) {
// while (k < L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u \n",
// query_id,
// iter);
// }
last_k = L;
// Pick top-M
for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) {
idi index_set_L = c_i + master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_;
}
if (!top_m_candidates_size) {
break;
}
// time_pick_top_m_ += WallTimer::get_time_mark();
nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
int tid = omp_get_thread_num();
// Computation quota
if (threads_computations_[tid] >= thread_compuation_quota_) {
continue;
}
uint64_t tmp_last_count_computation = tmp_count_computation;
idi local_queue_start = local_queues_starts[tid];
idi &local_queue_size = local_queues_sizes[tid];
idi cand_id = top_m_candidates[c_i];
if (num_threads_ - 1 != tid) {
expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
is_visited,
tmp_count_computation);
} else {
idi r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
L,
is_visited,
tmp_count_computation);
if (r < nk) {
nk = r;
}
}
threads_computations_[tid] += tmp_count_computation - tmp_last_count_computation;
}
top_m_candidates_size = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
time_full_merge_ -= WallTimer::get_time_mark();
idi r = merge_all_queues_para_array(
set_L,
local_queues_sizes,
local_queue_capacity,
L);
if (r < nk) {
nk = r;
}
time_full_merge_ += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} else { // 8 threads
idi tmp_iter_bound = 9;
bool is_finished = false;
bool is_full_merged = true;
idi M_group;
std::vector<idi> ks(num_groups, 0);
ks[num_groups - 1] = k;
std::vector<idi> nks(num_groups);
std::vector<idi> last_ks(num_groups);
uint64_t tmp_count_distance_computation = 0;
// bool is_finished = false;
while (!is_finished) {
++para_iter;
++iter;
M_group = M / num_groups;
is_finished = true;
auto s = std::chrono::high_resolution_clock::now();
if (1 == para_iter || (para_iter - 1) % full_merge_freq) {
// Initialize every group's top-M candidates from the global Master queue
// time_move_top_m_ -= WallTimer::get_time_mark();
// master_top_m_to_groups(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// top_m_candidates,
// top_m_candidates_starts,
// top_m_candidates_sizes,
// ks[num_groups - 1],
// last_ks[num_groups - 1],
// M,
// num_groups);
// time_move_top_m_ += WallTimer::get_time_mark();
std::fill(top_m_candidates_sizes.begin(), top_m_candidates_sizes.end(), --tmp_iter_bound);
}
auto e = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = e - s;
time_move_top_m_ += diff.count();
// {//test
// printf("query_id: %u "
// "para_iter: %u "
// "iter: %u "
// "M_group: %u "
// "top_m_c_sizes: %u %u\n",
// query_id,
// para_iter,
// iter,
// M_group,
// top_m_candidates_sizes[0], top_m_candidates_sizes[1]);
// }
//#pragma omp parallel for num_threads(num_groups) \
// reduction(+ : tmp_count_distance_computation)
#pragma omp parallel for reduction(+ : tmp_count_distance_computation)
for (idi g_i = 0; g_i < num_groups; ++g_i) {
const idi local_master_queue_id = g_i * group_size + group_size - 1;
const idi local_master_queue_start = local_queues_starts[local_master_queue_id];
idi &local_master_queue_size = local_queues_sizes[local_master_queue_id];
idi &k_uc = ks[g_i];
const idi top_m_candidates_start = top_m_candidates_starts[g_i];
idi &top_m_candidates_size = top_m_candidates_sizes[g_i];
idi &last_k = last_ks[g_i];
// Pick top-M
// if (1 != para_iter && 0 == (para_iter - 1) % full_merge_freq) {
//// if ((para_iter - 1) % full_merge_freq) {
// last_k = L;
// for (idi c_i = k_uc; c_i < local_master_queue_size && top_m_candidates_size < M_group; ++c_i) {
// idi index_set_L = c_i + local_master_queue_start;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_start + top_m_candidates_size++] = set_L[index_set_L].id_;
// }
// }
if (!top_m_candidates_size) {
continue;
}
is_finished = false;
idi &nk = nks[g_i];
nk = L;
idi c_i_start = top_m_candidates_starts[g_i];
idi c_i_bound = c_i_start + top_m_candidates_size;
uint64_t tmp_count_distance_computation_ig = 0;
// Expand top-M
//#pragma omp parallel for num_threads(group_size) \
// reduction(+ : tmp_count_distance_computation_ig)
#pragma omp parallel for reduction(+ : tmp_count_distance_computation_ig)
for (idi c_i = c_i_start; c_i < c_i_bound; ++c_i) {
idi tid_ig = omp_get_thread_num();
// idi tid_ig = (c_i - c_i_start) % group_size;
idi q_id = g_i * group_size + tid_ig;
if (threads_computations_[q_id] >= thread_compuation_quota_) {
continue;
}
// uint64_t tmp_last_count_computation_ig = tmp_count_distance_computation_ig;
// idi cand_id = top_m_candidates[c_i];
//// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
//// for (idi n_i = 0; n_i < out_degree; ++n_i) {
//// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
//// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_distance_computation_ig;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (local_master_queue_size == local_master_queue_capacity
// && dist > set_L[local_master_queue_size - 1 + local_master_queue_start].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
//
// if (0 != tid_ig) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// local_queues_starts[q_id - 1],
// local_queues_sizes[q_id - 1],
// local_queue_capacity,
// cand);
// } else if (num_groups - 1 != g_i) {
// // Thread 0 but not the last group maintains the local master queue
// idi r = add_into_queue(
// set_L,
// local_master_queue_start,
// local_master_queue_size,
// local_master_queue_capacity,
// cand);
// if (r < nk) {
// nk = r;
// }
// } else {
// // Thread 0 and the last group maintains the master queue
// idi r = add_into_queue(
// set_L,
// local_master_queue_start,
// local_master_queue_size,
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// threads_computations_[q_id] += tmp_count_distance_computation_ig - tmp_last_count_computation_ig;
} // Expand in a group
tmp_count_distance_computation += tmp_count_distance_computation_ig;
top_m_candidates_size = 0;
// Merge in a group
// if (0 == (para_iter % full_merge_freq)) {
// idi r;
// if (num_groups - 1 != g_i) {
// // Normal group
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// local_master_queue_capacity);
// } else {
// // The group contains the master queue
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// L);
// }
// if (r < nk) {
// nk = r;
// }
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
// }
} // Middle Level Parallelism
count_distance_computation_ += tmp_count_distance_computation;
tmp_count_distance_computation = 0;
// Do full merge and distribute
if (!is_finished && para_iter % full_merge_freq) {
// Full merge
time_full_merge_ -= WallTimer::get_time_mark();
++count_full_merge_;
idi r = merge_all_queues_to_master(
set_L,
local_queues_starts,
local_queues_sizes,
local_queue_capacity,
local_master_queue_capacity,
L,
group_size);
time_full_merge_ += WallTimer::get_time_mark();
// is_full_merged = true;
// idi &nk = nks[num_groups - 1];
// idi &k_uc = ks[num_groups - 1];
// idi &last_k = last_ks[num_groups - 1];
// if (r < nk) {
// nk = r;
// }
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
} else {
is_full_merged = false;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
} // Iteration
// if (!is_full_merged) {
// merge_all_queues_to_master(
// set_L,
// local_queues_sizes,
// local_queues_sizes,
// local_queue_capacity,
// local_master_queue_capacity,
// L,
// group_size);
// }
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + master_queue_start].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
std::fill(threads_computations_.begin(), threads_computations_.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* 8/8/2020-12:58
* A group of 4 workers do the intermediate level search for one iteration
* This is used for hierarchy and parallel SECTIONS.
*/
inline void Searching::group_search_for_one_iteration(
const idi g_i,
const dataf *query_data,
const idi M_group,
const idi L,
std::vector<Candidate> &set_L,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
const idi top_m_candidates_start,
idi &top_m_candidates_size,
boost::dynamic_bitset<> &is_visited,
idi &k_uc,
idi &last_k,
idi &nk,
const idi para_iter,
const idi group_size, // Should be 4
const idi num_groups,
const idi full_merge_freq,
bool &is_finished,
uint64_t &group_distance_computation)
{
const idi local_master_queue_id = g_i * group_size + group_size - 1;
const idi local_master_queue_start = local_queues_starts[local_master_queue_id];
idi &local_master_queue_size = local_queues_sizes[local_master_queue_id];
// idi &k_uc = ks[g_i];
// const idi top_m_candidates_start = top_m_candidates_starts[g_i];
// idi &top_m_candidates_size = top_m_candidates_sizes[g_i];
// idi &last_k = last_ks[g_i];
// // Pick top-M
// if (1 != para_iter && 0 == (para_iter - 1) % full_merge_freq) {
// last_k = L;
// for (idi c_i = k_uc; c_i < local_master_queue_size && top_m_candidates_size < M_group; ++c_i) {
// idi index_set_L = c_i + local_master_queue_start;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_start + top_m_candidates_size++] = set_L[index_set_L].id_;
// }
// }
if (!top_m_candidates_size) {
return;
}
is_finished = false;
// if (!top_m_candidates_size) {
// continue;
// }
// idi &nk = nks[g_i];
nk = L;
idi c_i_start = top_m_candidates_start;
idi c_i_bound = c_i_start + top_m_candidates_size;
// uint64_t tmp_count_distance_computation_ig = 0;
// Expand top-M
//#pragma omp parallel for num_threads(group_size) \
// reduction(+ : group_distance_computation)
#pragma omp parallel for reduction(+ : group_distance_computation)
for (idi c_i = c_i_start; c_i < c_i_bound; ++c_i) {
idi tid_ig = omp_get_thread_num();
idi q_id = g_i * group_size + tid_ig;
if (threads_computations_[q_id] >= thread_compuation_quota_) {
continue;
}
//
//// uint64_t tmp_last_count_computation_ig = tmp_count_distance_computation_ig;
// uint64_t tmp_last_distance_computation = group_distance_computation;
// idi cand_id = top_m_candidates[c_i];
//// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
//// for (idi n_i = 0; n_i < out_degree; ++n_i) {
//// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
//// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++group_distance_computation;
//// ++tmp_count_distance_computation_ig;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (local_master_queue_size == local_master_queue_capacity
// && dist > set_L[local_master_queue_size - 1 + local_master_queue_start].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
//
// if (0 != tid_ig) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// local_queues_starts[q_id - 1],
// local_queues_sizes[q_id - 1],
// local_queue_capacity,
// cand);
// } else if (num_groups - 1 != g_i) {
// // Thread 0 but not the last group maintains the local master queue
// idi r = add_into_queue(
// set_L,
// local_master_queue_start,
// local_master_queue_size,
// local_master_queue_capacity,
// cand);
// if (r < nk) {
// nk = r;
// }
// } else {
// // Thread 0 and the last group maintains the master queue
// idi r = add_into_queue(
// set_L,
// local_master_queue_start,
// local_master_queue_size,
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// threads_computations_[q_id] += group_distance_computation - tmp_last_distance_computation;
}
}
/*
* 8/7/2020-21:16
* Use parallel SECTIONS rather than For-Loop for groups. Want to see if overhead changed.
*/
inline void Searching::para_search_with_top_m_hierarchy_merge_v2(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq)
{
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
const idi master_queue_start = local_queues_starts[num_threads_ - 1];
const idi num_groups = (num_threads_ - 1) / group_size + 1; // 4 workers per group.
const dataf *query_data = queries_load_ + query_id * dimension_;
// Initialization Phase
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
uint64_t tmp_count_computation = 0;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
threads_computations_[0] += tmp_count_computation;
// tmp_count_computation = 0;
std::sort(
set_L.begin() + master_queue_start,
set_L.begin() + master_queue_start + L);
local_queues_sizes[num_threads_ - 1] = L;
} // Initialization Phase
// time_initialization_ += WallTimer::get_time_mark();
// idi top_m_candidates_end = 0;
idi iter = 0; // for debug
idi M = 1;
idi k = 0; // Index of first unchecked candidate.
// Sequential Phase
{
uint64_t tmp_count_computation = 0;
while (k < L && M < value_M_middle) {
++iter;
if (threads_computations_[0] >= thread_compuation_quota_) {
break;
}
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
L,
set_L,
master_queue_start,
local_queues_sizes[num_threads_ - 1],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
threads_computations_[0] += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} // Sequential Phase
// if (M < static_cast<idi>(num_threads_)) {
// M = num_threads_;
// }
// Divide computation cost from thread 0 to others
{
// printf("threads_computations_[0]: %lu\n",
// threads_computations_[0]);
std::fill(
threads_computations_.begin(),
threads_computations_.end(),
threads_computations_[0] * (1.0 / num_threads_));
}
// Parallel Phase
idi para_iter = 0;
// if (true) {
if (num_threads_ <= 4) {
idi top_m_candidates_size = 0;
idi last_k;
idi nk;
uint64_t tmp_count_computation = 0;
while (true) {
// while (k < L) {
++iter;
// {//test
// printf("query_id: %u "
// "iter: %u \n",
// query_id,
// iter);
// }
last_k = L;
// Pick top-M
for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) {
idi index_set_L = c_i + master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_;
}
if (!top_m_candidates_size) {
break;
}
// time_pick_top_m_ += WallTimer::get_time_mark();
nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
int tid = omp_get_thread_num();
// Computation quota
if (threads_computations_[tid] >= thread_compuation_quota_) {
continue;
}
uint64_t tmp_last_count_computation = tmp_count_computation;
idi local_queue_start = local_queues_starts[tid];
idi &local_queue_size = local_queues_sizes[tid];
idi cand_id = top_m_candidates[c_i];
if (num_threads_ - 1 != tid) {
expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
is_visited,
tmp_count_computation);
} else {
idi r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
L,
is_visited,
tmp_count_computation);
if (r < nk) {
nk = r;
}
}
threads_computations_[tid] += tmp_count_computation - tmp_last_count_computation;
}
top_m_candidates_size = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
time_full_merge_ -= WallTimer::get_time_mark();
idi r = merge_all_queues_para_array(
set_L,
local_queues_sizes,
local_queue_capacity,
L);
if (r < nk) {
nk = r;
}
time_full_merge_ += WallTimer::get_time_mark();
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} else { // 8 threads
idi tmp_iter_bound = 9;
bool is_finished = false;
bool is_full_merged = true;
idi M_group;
std::vector<idi> ks(num_groups, 0);
ks[num_groups - 1] = k;
std::vector<idi> nks(num_groups);
std::vector<idi> last_ks(num_groups);
uint64_t tmp_count_distance_computation = 0;
while (!is_finished) {
++para_iter;
++iter;
M_group = M / num_groups;
is_finished = true;
auto s = std::chrono::high_resolution_clock::now();
if (1 == para_iter || (para_iter - 1) % full_merge_freq) {
// Initialize every group's top-M candidates from the global Master queue
// time_move_top_m_ -= WallTimer::get_time_mark();
// master_top_m_to_groups(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// top_m_candidates,
// top_m_candidates_starts,
// top_m_candidates_sizes,
// ks[num_groups - 1],
// last_ks[num_groups - 1],
// M,
// num_groups);
// time_move_top_m_ += WallTimer::get_time_mark();
std::fill(top_m_candidates_sizes.begin(), top_m_candidates_sizes.end(), --tmp_iter_bound);
}
auto e = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = e - s;
time_move_top_m_ += diff.count();
// {//test
// printf("query_id: %u "
// "para_iter: %u "
// "iter: %u "
// "M_group: %u "
// "top_m_c_sizes: %u %u\n",
// query_id,
// para_iter,
// iter,
// M_group,
// top_m_candidates_sizes[0], top_m_candidates_sizes[1]);
// }
//#pragma omp parallel sections num_threads(2) \
// reduction(+ : tmp_count_distance_computation)
#pragma omp parallel sections reduction(+ : tmp_count_distance_computation)
{
#pragma omp section
{
idi g_i = 0;
idi &top_m_candidates_size = top_m_candidates_sizes[g_i];
idi &k_uc = ks[g_i];
idi &last_k = last_ks[g_i];
idi &nk = nks[g_i];
group_search_for_one_iteration(
g_i,
query_data,
M_group,
L,
set_L,
local_queue_capacity, // Maximum size of local queue
local_master_queue_capacity, // Maximum size of local master queue
local_queues_starts,
local_queues_sizes, // Sizes of local queue
top_m_candidates,
top_m_candidates_starts[g_i],
top_m_candidates_size,
is_visited,
ks[g_i],
last_ks[g_i],
nks[g_i],
para_iter,
group_size, // Should be 4
num_groups,
full_merge_freq,
is_finished,
tmp_count_distance_computation);
top_m_candidates_size = 0;
// // Merge in a group
// if (0 == (para_iter % full_merge_freq)) {
// idi r;
// if (num_groups - 1 != g_i) {
// // Normal group
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// local_master_queue_capacity);
// } else {
// // The group contains the master queue
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// L);
// }
// if (r < nk) {
// nk = r;
// }
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
// }
}
#pragma omp section
{
idi g_i = 1;
idi &top_m_candidates_size = top_m_candidates_sizes[g_i];
idi &k_uc = ks[g_i];
idi &last_k = last_ks[g_i];
idi &nk = nks[g_i];
group_search_for_one_iteration(
g_i,
query_data,
M_group,
L,
set_L,
local_queue_capacity, // Maximum size of local queue
local_master_queue_capacity, // Maximum size of local master queue
local_queues_starts,
local_queues_sizes, // Sizes of local queue
top_m_candidates,
top_m_candidates_starts[g_i],
top_m_candidates_size,
is_visited,
ks[g_i],
last_ks[g_i],
nks[g_i],
para_iter,
group_size, // Should be 4
num_groups,
full_merge_freq,
is_finished,
tmp_count_distance_computation);
top_m_candidates_size = 0;
// // Merge in a group
// if (0 == (para_iter % full_merge_freq)) {
// idi r;
// if (num_groups - 1 != g_i) {
// // Normal group
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// local_master_queue_capacity);
// } else {
// // The group contains the master queue
// r = merge_queues_of_four(
// set_L,
// local_queues_starts,
// local_queues_sizes,
// g_i,
// local_queue_capacity,
// L);
// }
// if (r < nk) {
// nk = r;
// }
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
// }
}
}
count_distance_computation_ += tmp_count_distance_computation;
tmp_count_distance_computation = 0;
// Do full merge and distribute
if (!is_finished && para_iter % full_merge_freq) {
// Full merge
time_full_merge_ -= WallTimer::get_time_mark();
++count_full_merge_;
idi r = merge_all_queues_to_master(
set_L,
local_queues_starts,
local_queues_sizes,
local_queue_capacity,
local_master_queue_capacity,
L,
group_size);
time_full_merge_ += WallTimer::get_time_mark();
// is_full_merged = true;
// idi &nk = nks[num_groups - 1];
// idi &k_uc = ks[num_groups - 1];
// idi &last_k = last_ks[num_groups - 1];
// if (r < nk) {
// nk = r;
// }
// if (nk <= last_k) {
// k_uc = nk;
// } else {
// k_uc = last_k + 1;
// }
} else {
is_full_merged = false;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
} // Iteration
// if (!is_full_merged) {
// merge_all_queues_to_master(
// set_L,
// local_queues_sizes,
// local_queues_sizes,
// local_queue_capacity,
// local_master_queue_capacity,
// L,
// group_size);
// }
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + master_queue_start].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
std::fill(threads_computations_.begin(), threads_computations_.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* Function: expand a candidate, visiting its neighbors.
* Return the lowest adding location.
*/
inline idi Searching::expand_one_candidate(
idi cand_id,
const dataf *query_data,
const distf &dist_bound,
std::vector<Candidate> &set_L,
const idi local_queue_start,
idi &local_queue_size,
const idi &local_queue_capacity,
boost::dynamic_bitset<> &is_visited,
// const idi nk_init,
uint64_t &local_count_computation)
{
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
idi nk = local_queue_capacity;
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > dist_bound) {
// if (dist > set_L[L - 1 + master_queue_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
idi r = add_into_queue(
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
cand);
if (r < nk) {
nk = r;
}
}
return nk;
}
/*
* 8/6/2020-11:58
* Based on Middle-4, but reduce full merge frequency.
* Actually, this is local Searching, not Less Synchronization.
*/
inline void Searching::para_search_with_top_m_less_sync_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi full_merge_freq,
const idi local_iter_bound)
{
const idi master_queue_start = local_queues_starts[num_threads_ - 1];
const dataf *query_data = queries_load_ + query_id * dimension_;
// Initialization Phase
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
uint64_t tmp_count_computation = 0;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
std::sort(
set_L.begin() + master_queue_start,
set_L.begin() + master_queue_start + L);
local_queues_sizes[num_threads_ - 1] = L;
} // Initialization Phase
idi iter = 0; // for debug
idi M = 1;
idi k = 0; // Index of first unchecked candidate.
// Sequential Phase
{
uint64_t tmp_count_computation = 0;
while (k < L && M < value_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
L,
set_L,
master_queue_start,
local_queues_sizes[num_threads_ - 1],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} // Sequential Phase
// if (M < static_cast<idi>(num_threads_)) {
// M = num_threads_;
// }
// Parallel Phase
idi para_iter = 0;
idi top_m_candidates_size = 0;
idi last_k;
idi nk;
uint64_t tmp_count_computation = 0;
while (true) {
// while (k < L) {
++para_iter;
++iter;
last_k = L;
// Pick top-M
for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) {
idi index_set_L = c_i + master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_;
}
if (!top_m_candidates_size) {
break;
}
nk = L;
// Expand
//#pragma omp parallel for reduction(+ : tmp_count_computation)
#pragma omp parallel reduction(+ : tmp_count_computation)
{
#pragma omp for nowait
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
int tid = omp_get_thread_num();
idi local_queue_start = local_queues_starts[tid];
idi &local_queue_size = local_queues_sizes[tid];
idi cand_id = top_m_candidates[c_i];
if (num_threads_ - 1 != tid) {
expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
is_visited,
tmp_count_computation);
} else {
idi r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
L,
is_visited,
tmp_count_computation);
if (r < nk) {
nk = r;
}
}
} // Expand
if (0 == (para_iter % full_merge_freq)) {
// Local search iterations
int q_i = omp_get_thread_num();
idi local_queue_start = local_queues_starts[q_i];
idi &local_queue_size = local_queues_sizes[q_i];
const idi queue_capacity = (num_threads_ - 1 != q_i) ? local_queue_capacity : L;
idi tmp_k;
if (num_threads_ - 1 != q_i) {
tmp_k = 0;
} else {
if (nk <= last_k) {
tmp_k = nk;
} else {
tmp_k = last_k + 1;
}
}
// if (tmp_k >= local_queue_size) {
// continue;
// }
idi i_t = 0;
idi cand_id;
while (tmp_k < local_queue_size) {
idi r;
if (!set_L[local_queue_start + tmp_k].is_checked_) {
set_L[local_queue_start + tmp_k].is_checked_ = true;
cand_id = set_L[local_queue_start + tmp_k].id_;
// Expand
r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
queue_capacity,
is_visited,
tmp_count_computation);
if (++i_t == local_iter_bound) {
break;
}
} else {
r = queue_capacity;
}
if (r <= tmp_k) {
tmp_k = r;
} else {
++tmp_k;
}
}
} // Local Search
} // OMP Parallel Construct
top_m_candidates_size = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_sizes,
local_queue_capacity,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + master_queue_start].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
}
// {//test
// if (14 == query_id) {
// exit(1);
// }
// }
}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
effects.c | #define _POSIX_C_SOURCE 200809
#include <omp.h>
#include <stdlib.h>
#include <stdbool.h>
#include <dlfcn.h>
#include <string.h>
#include <errno.h>
#include <sys/wait.h>
#include <unistd.h>
#include <spawn.h>
#include "effects.h"
#include "log.h"
#include <time.h>
// glib might or might not have already defined MIN,
// depending on whether we have pixbuf or not...
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
extern char **environ;
static int screen_size_to_pix(struct swaylock_effect_screen_pos size, int screensize) {
int actual = size.pos;
if (size.is_percent)
actual = (size.pos / 100.0) * screensize;
return actual;
}
static int screen_pos_to_pix(struct swaylock_effect_screen_pos pos, int screensize) {
int actual = pos.pos;
if (pos.is_percent)
actual = (pos.pos / 100.0) * screensize;
if (actual < 0)
actual = screensize + actual;
return actual;
}
static void screen_pos_pair_to_pix(
struct swaylock_effect_screen_pos posx,
struct swaylock_effect_screen_pos posy,
int objwidth, int objheight,
int screenwidth, int screenheight, int gravity,
int *outx, int *outy) {
int x = screen_pos_to_pix(posx, screenwidth);
int y = screen_pos_to_pix(posy, screenheight);
// Adjust X
switch (gravity) {
case EFFECT_COMPOSE_GRAV_CENTER:
case EFFECT_COMPOSE_GRAV_N:
case EFFECT_COMPOSE_GRAV_S:
x -= objwidth / 2;
break;
case EFFECT_COMPOSE_GRAV_NW:
case EFFECT_COMPOSE_GRAV_SW:
case EFFECT_COMPOSE_GRAV_W:
break;
case EFFECT_COMPOSE_GRAV_NE:
case EFFECT_COMPOSE_GRAV_SE:
case EFFECT_COMPOSE_GRAV_E:
x -= objwidth;
break;
}
// Adjust Y
switch (gravity) {
case EFFECT_COMPOSE_GRAV_CENTER:
case EFFECT_COMPOSE_GRAV_W:
case EFFECT_COMPOSE_GRAV_E:
y -= objheight / 2;
break;
case EFFECT_COMPOSE_GRAV_NW:
case EFFECT_COMPOSE_GRAV_NE:
case EFFECT_COMPOSE_GRAV_N:
break;
case EFFECT_COMPOSE_GRAV_SW:
case EFFECT_COMPOSE_GRAV_SE:
case EFFECT_COMPOSE_GRAV_S:
y -= objheight;
break;
}
*outx = x;
*outy = y;
}
static uint32_t blend_pixels(float alpha, uint32_t srcpix, uint32_t destpix) {
uint8_t srcr = (srcpix & 0x00ff0000) >> 16;
uint8_t destr = (destpix & 0x00ff0000) >> 16;
uint8_t srcg = (srcpix & 0x0000ff00) >> 8;
uint8_t destg = (destpix & 0x0000ff00) >> 8;
uint8_t srcb = (srcpix & 0x000000ff) >> 0;
uint8_t destb = (destpix & 0x000000ff) >> 0;
return (uint32_t)0 |
(uint32_t)255 << 24 |
(uint32_t)(srcr + destr * (1 - alpha)) << 16 |
(uint32_t)(srcg + destg * (1 - alpha)) << 8 |
(uint32_t)(srcb + destb * (1 - alpha)) << 0;
}
static void blur_h(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
const int minradius = radius < width ? radius : width;
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
uint32_t *srow = src + y * width;
uint32_t *drow = dest + y * width;
// 'range' is float, because floating point division is usually faster
// than integer division.
int r_acc = 0;
int g_acc = 0;
int b_acc = 0;
float range = minradius;
// Accumulate the range (0..radius)
for (int x = 0; x < minradius; ++x) {
r_acc += (srow[x] & 0xff0000) >> 16;
g_acc += (srow[x] & 0x00ff00) >> 8;
b_acc += (srow[x] & 0x0000ff);
}
// Deal with the main body
for (int x = 0; x < width; ++x) {
if (x >= minradius) {
r_acc -= (srow[x - radius] & 0xff0000) >> 16;
g_acc -= (srow[x - radius] & 0x00ff00) >> 8;
b_acc -= (srow[x - radius] & 0x0000ff);
range -= 1;
}
if (x < width - minradius) {
r_acc += (srow[x + radius] & 0xff0000) >> 16;
g_acc += (srow[x + radius] & 0x00ff00) >> 8;
b_acc += (srow[x + radius] & 0x0000ff);
range += 1;
}
drow[x] = 0 |
(int)(r_acc / range) << 16 |
(int)(g_acc / range) << 8 |
(int)(b_acc / range);
}
}
}
static void blur_v(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
const int minradius = radius < height ? radius : height;
#pragma omp parallel for
for (int x = 0; x < width; ++x) {
uint32_t *scol = src + x;
uint32_t *dcol = dest + x;
// 'range' is float, because floating point division is usually faster
// than integer division.
int r_acc = 0;
int g_acc = 0;
int b_acc = 0;
float range = minradius;
// Accumulate the range (0..radius)
for (int y = 0; y < minradius; ++y) {
r_acc += (scol[y * width] & 0xff0000) >> 16;
g_acc += (scol[y * width] & 0x00ff00) >> 8;
b_acc += (scol[y * width] & 0x0000ff);
}
// Deal with the main body
for (int y = 0; y < height; ++y) {
if (y >= minradius) {
r_acc -= (scol[(y - radius) * width] & 0xff0000) >> 16;
g_acc -= (scol[(y - radius) * width] & 0x00ff00) >> 8;
b_acc -= (scol[(y - radius) * width] & 0x0000ff);
range -= 1;
}
if (y < height - minradius) {
r_acc += (scol[(y + radius) * width] & 0xff0000) >> 16;
g_acc += (scol[(y + radius) * width] & 0x00ff00) >> 8;
b_acc += (scol[(y + radius) * width] & 0x0000ff);
range += 1;
}
dcol[y * width] = 0 |
(int)(r_acc / range) << 16 |
(int)(g_acc / range) << 8 |
(int)(b_acc / range);
}
}
}
static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch,
int width, int height, int radius) {
blur_h(scratch, src, width, height, radius);
blur_v(dest, scratch, width, height, radius);
}
// This effect_blur function, and the associated blur_* functions,
// are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid:
// https://github.com/yvbbrjdr/i3lock-fancy-rapid
static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height,
int radius, int times) {
uint32_t *origdest = dest;
uint32_t *scratch = malloc(width * height * sizeof(*scratch));
blur_once(dest, src, scratch, width, height, radius);
for (int i = 0; i < times - 1; ++i) {
uint32_t *tmp = src;
src = dest;
dest = tmp;
blur_once(dest, src, scratch, width, height, radius);
}
free(scratch);
// We're flipping between using dest and src;
// if the last buffer we used was src, copy that over to dest.
if (dest != origdest)
memcpy(origdest, dest, width * height * sizeof(*dest));
}
static void effect_pixelate(uint32_t *data, int width, int height, int factor) {
#pragma omp parallel for
for (int y = 0; y < height / factor + 1; ++y) {
for (int x = 0; x < width / factor + 1; ++x) {
int total_r = 0, total_g = 0, total_b = 0;
int xstart = x * factor;
int ystart = y * factor;
int xlim = MIN(xstart + factor, width);
int ylim = MIN(ystart + factor, height);
// Average
for (int ry = ystart; ry < ylim; ++ry) {
for (int rx = xstart; rx < xlim; ++rx) {
int index = ry * width + rx;
total_r += (data[index] & 0xff0000) >> 16;
total_g += (data[index] & 0x00ff00) >> 8;
total_b += (data[index] & 0x0000ff);
}
}
int r = total_r / (factor * factor);
int g = total_g / (factor * factor);
int b = total_b / (factor * factor);
// Fill pixels
for (int ry = ystart; ry < ylim; ++ry) {
for (int rx = xstart; rx < xlim; ++rx) {
int index = ry * width + rx;
data[index] = r << 16 | g << 8 | b;
}
}
}
}
}
static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight,
double scale) {
int dwidth = swidth * scale;
int dheight = sheight * scale;
double fact = 1.0 / scale;
#pragma omp parallel for
for (int dy = 0; dy < dheight; ++dy) {
int sy = dy * fact;
if (sy >= sheight) continue;
for (int dx = 0; dx < dwidth; ++dx) {
int sx = dx * fact;
if (sx >= swidth) continue;
dest[dy * dwidth + dx] = src[sy * swidth + sx];
}
}
}
static void effect_greyscale(uint32_t *data, int width, int height) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b;
if (luma < 0) luma = 0;
if (luma > 255) luma = 255;
luma &= 0xFF;
data[index] = luma << 16 | luma << 8 | luma;
}
}
}
static void effect_vignette(uint32_t *data, int width, int height,
double base, double factor) {
base = fmin(1, fmax(0, base));
factor = fmin(1 - base, fmax(0, factor));
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double xf = (x * 1.0) / width;
double yf = (y * 1.0) / height;
double vignette_factor = base + factor
* 16 * xf * yf * (1.0 - xf) * (1.0 - yf);
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
r = (int)(r * vignette_factor) & 0xFF;
g = (int)(g * vignette_factor) & 0xFF;
b = (int)(b * vignette_factor) & 0xFF;
data[index] = r << 16 | g << 8 | b;
}
}
}
static void effect_compose(uint32_t *data, int width, int height,
struct swaylock_effect_screen_pos posx,
struct swaylock_effect_screen_pos posy,
struct swaylock_effect_screen_pos posw,
struct swaylock_effect_screen_pos posh,
int gravity, char *imgpath) {
#if !HAVE_GDK_PIXBUF
(void)&blend_pixels;
(void)&screen_size_to_pix;
(void)&screen_pos_pair_to_pix;
swaylock_log(LOG_ERROR, "Compose effect: Compiled without gdk_pixbuf support.\n");
return;
#else
int imgw = screen_size_to_pix(posw, width);
int imgh = screen_size_to_pix(posh, height);
bool preserve_aspect = imgw < 0 || imgh < 0;
GError *err = NULL;
GdkPixbuf *pixbuf = gdk_pixbuf_new_from_file_at_scale(
imgpath, imgw, imgh, preserve_aspect, &err);
if (!pixbuf) {
swaylock_log(LOG_ERROR, "Compose effect: Failed to load image file '%s' (%s).",
imgpath, err->message);
g_error_free(err);
return;
}
cairo_surface_t *image = gdk_cairo_image_surface_create_from_pixbuf(pixbuf);
g_object_unref(pixbuf);
int bufw = cairo_image_surface_get_width(image);
int bufh = cairo_image_surface_get_height(image);
uint32_t *bufdata = (uint32_t *)cairo_image_surface_get_data(image);
int bufstride = cairo_image_surface_get_stride(image) / 4;
bool bufalpha = cairo_image_surface_get_format(image) == CAIRO_FORMAT_ARGB32;
int imgx, imgy;
screen_pos_pair_to_pix(
posx, posy, bufw, bufh,
width, height, gravity,
&imgx, &imgy);
#pragma omp parallel for
for (int offy = 0; offy < bufh; ++offy) {
if (offy + imgy < 0 || offy + imgy > height)
continue;
for (int offx = 0; offx < bufw; ++offx) {
if (offx + imgx < 0 || offx + imgx > width)
continue;
size_t idx = (size_t)(offy + imgy) * width + (offx + imgx);
size_t bufidx = (size_t)offy * bufstride + (offx);
if (!bufalpha) {
data[idx] = bufdata[bufidx];
} else {
uint8_t alpha = (bufdata[bufidx] & 0xff000000) >> 24;
if (alpha == 255) {
data[idx] = bufdata[bufidx];
} else if (alpha != 0) {
data[idx] = blend_pixels(alpha / 255.0, bufdata[bufidx], data[idx]);
}
}
}
}
cairo_surface_destroy(image);
#endif
}
static void effect_custom(uint32_t *data, int width, int height,
char *path) {
void *dl = dlopen(path, RTLD_LAZY);
if (dl == NULL) {
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
return;
}
void (*effect_func)(uint32_t *data, int width, int height) =
dlsym(dl, "swaylock_effect");
if (effect_func != NULL) {
effect_func(data, width, height);
dlclose(dl);
return;
}
uint32_t (*pixel_func)(uint32_t pix, int x, int y, int width, int height) =
dlsym(dl, "swaylock_pixel");
if (pixel_func != NULL) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
data[y * width + x] =
pixel_func(data[y * width + x], x, y, width, height);
}
}
dlclose(dl);
return;
}
swaylock_log(LOG_ERROR, "Custom effect: %s", dlerror());
}
cairo_surface_t *swaylock_effects_run(cairo_surface_t *surface,
struct swaylock_effect *effects, int count) {
for (int i = 0; i < count; ++i) {
struct swaylock_effect *effect = &effects[i];
switch (effect->tag) {
case EFFECT_BLUR: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for blur effect");
cairo_surface_destroy(surf);
break;
}
effect_blur(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.blur.radius, effect->e.blur.times);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_PIXELATE: {
effect_pixelate(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.pixelate.factor);
cairo_surface_flush(surface);
break;
}
case EFFECT_SCALE: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface) * effect->e.scale,
cairo_image_surface_get_height(surface) * effect->e.scale);
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for scale effect");
cairo_surface_destroy(surf);
break;
}
effect_scale(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.scale);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_GREYSCALE: {
effect_greyscale(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
cairo_surface_flush(surface);
break;
}
case EFFECT_VIGNETTE: {
effect_vignette(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.vignette.base,
effect->e.vignette.factor);
cairo_surface_flush(surface);
break;
}
case EFFECT_COMPOSE: {
effect_compose(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.compose.x, effect->e.compose.y,
effect->e.compose.w, effect->e.compose.h,
effect->e.compose.gravity, effect->e.compose.imgpath);
cairo_surface_flush(surface);
break;
}
case EFFECT_CUSTOM: {
effect_custom(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.custom);
cairo_surface_flush(surface);
break;
} }
}
return surface;
}
|
schelude-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0,x;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(static,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
GB_unaryop__identity_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_uint32
// op(A') function: GB_tran__identity_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_uint32
(
uint64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gemv.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_OPS_ARM_FP16_GEMV_H_
#define MACE_OPS_ARM_FP16_GEMV_H_
#if defined(MACE_ENABLE_NEON) && \
defined(__ARM_FP16_FORMAT_IEEE) && (__ARM_FP & 2)
// TODO(lichao): replace it with global macro
#define MACE_ENABLE_FP16_NEON
#endif
#include "mace/core/types.h"
#if defined(MACE_ENABLE_NEON) && defined(__ANDROID__)
#include <arm_neon.h>
#endif
#if defined(MACE_ENABLE_NEON) && !defined(__aarch64__) && defined(__ANDROID__)
#define vaddvq_f32(v) ((v)[0] + (v)[1] + (v)[2] + (v)[3])
#endif
namespace mace {
namespace ops {
template<typename INPUT_TYPE_LEFT,
typename INPUT_TYPE_RIGHT,
typename OUTPUT_TYPE>
void FP16Gemv(const INPUT_TYPE_LEFT *m_ptr,
const INPUT_TYPE_RIGHT *v_ptr,
const index_t height,
const index_t width,
OUTPUT_TYPE *result);
#if defined(MACE_ENABLE_FP16_NEON) && defined(__ANDROID__)
template<>
void FP16Gemv<float16_t, float, float>(const float16_t *m_ptr,
const float *v_ptr,
const index_t height,
const index_t width,
float *out_ptr) {
#pragma omp parallel for
for (index_t h = 0; h < height; ++h) {
const float16_t *m_ptr0 = m_ptr + h * width;
const float *v_ptr0 = v_ptr;
float *out_ptr0 = out_ptr + h;
float sum0 = 0;
float32x4_t vm0, vm1, vm2, vm3;
float32x4_t vv0, vv1, vv2, vv3;
float32x4_t vsum0 = vdupq_n_f32(0.f);
float32x4_t vsum1 = vdupq_n_f32(0.f);
float32x4_t vsum2 = vdupq_n_f32(0.f);
float32x4_t vsum3 = vdupq_n_f32(0.f);
index_t w;
for (w = 0; w + 15 < width; w += 16) {
vm0 = vcvt_f32_f16(vld1_f16(m_ptr0));
vv0 = vld1q_f32(v_ptr0);
vm1 = vcvt_f32_f16(vld1_f16(m_ptr0 + 4));
vv1 = vld1q_f32(v_ptr0 + 4);
vm2 = vcvt_f32_f16(vld1_f16(m_ptr0 + 8));
vv2 = vld1q_f32(v_ptr0 + 8);
vm3 = vcvt_f32_f16(vld1_f16(m_ptr0 + 12));
vv3 = vld1q_f32(v_ptr0 + 12);
vsum0 = vmlaq_f32(vsum0, vm0, vv0);
vsum1 = vmlaq_f32(vsum1, vm1, vv1);
vsum2 = vmlaq_f32(vsum2, vm2, vv2);
vsum3 = vmlaq_f32(vsum3, vm3, vv3);
m_ptr0 += 16;
v_ptr0 += 16;
}
for (; w + 7 < width; w += 8) {
vm0 = vcvt_f32_f16(vld1_f16(m_ptr0));
vv0 = vld1q_f32(v_ptr0);
vm1 = vcvt_f32_f16(vld1_f16(m_ptr0 + 4));
vv1 = vld1q_f32(v_ptr0 + 4);
vsum0 = vmlaq_f32(vsum0, vm0, vv0);
vsum1 = vmlaq_f32(vsum1, vm1, vv1);
m_ptr0 += 8;
v_ptr0 += 8;
}
for (; w + 3 < width; w += 4) {
vm0 = vcvt_f32_f16(vld1_f16(m_ptr0));
vv0 = vld1q_f32(v_ptr0);
vsum0 = vmlaq_f32(vsum0, vm0, vv0);
m_ptr0 += 4;
v_ptr0 += 4;
}
vsum0 += vsum1;
vsum2 += vsum3;
vsum0 += vsum2;
sum0 = vaddvq_f32(vsum0);
for (; w < width; ++w) {
sum0 += m_ptr0[0] * v_ptr0[0];
m_ptr0++;
v_ptr0++;
}
*out_ptr0++ = sum0;
}
}
#endif // MACE_ENABLE_FP16_NEON && __ANDROID__
} // namespace ops
} // namespace mace
#endif // MACE_OPS_ARM_FP16_GEMV_H_
|
hist_util.h | /*!
* Copyright 2017 by Contributors
* \file hist_util.h
* \brief Utility for fast histogram aggregation
* \author Philip Cho, Tianqi Chen
*/
#ifndef XGBOOST_COMMON_HIST_UTIL_H_
#define XGBOOST_COMMON_HIST_UTIL_H_
#include <xgboost/data.h>
#include <xgboost/generic_parameters.h>
#include <limits>
#include <vector>
#include <algorithm>
#include <memory>
#include <utility>
#include "row_set.h"
#include "../tree/param.h"
#include "./quantile.h"
#include "./timer.h"
#include "random.h"
namespace xgboost {
/*!
* \brief A C-style array with in-stack allocation. As long as the array is smaller than
* MaxStackSize, it will be allocated inside the stack. Otherwise, it will be
* heap-allocated.
*/
template<typename T, size_t MaxStackSize>
class MemStackAllocator {
public:
explicit MemStackAllocator(size_t required_size): required_size_(required_size) {
}
T* Get() {
if (!ptr_) {
if (MaxStackSize >= required_size_) {
ptr_ = stack_mem_;
} else {
ptr_ = reinterpret_cast<T*>(malloc(required_size_ * sizeof(T)));
do_free_ = true;
}
}
return ptr_;
}
~MemStackAllocator() {
if (do_free_) free(ptr_);
}
private:
T* ptr_ = nullptr;
bool do_free_ = false;
size_t required_size_;
T stack_mem_[MaxStackSize];
};
namespace common {
/*
* \brief A thin wrapper around dynamically allocated C-style array.
* Make sure to call resize() before use.
*/
template<typename T>
struct SimpleArray {
~SimpleArray() {
free(ptr_);
ptr_ = nullptr;
}
void resize(size_t n) {
T* ptr = static_cast<T*>(malloc(n*sizeof(T)));
memcpy(ptr, ptr_, n_ * sizeof(T));
free(ptr_);
ptr_ = ptr;
n_ = n;
}
T& operator[](size_t idx) {
return ptr_[idx];
}
T& operator[](size_t idx) const {
return ptr_[idx];
}
size_t size() const {
return n_;
}
T back() const {
return ptr_[n_-1];
}
T* data() {
return ptr_;
}
const T* data() const {
return ptr_;
}
T* begin() {
return ptr_;
}
const T* begin() const {
return ptr_;
}
T* end() {
return ptr_ + n_;
}
const T* end() const {
return ptr_ + n_;
}
private:
T* ptr_ = nullptr;
size_t n_ = 0;
};
/*!
* \brief A single row in global histogram index.
* Directly represent the global index in the histogram entry.
*/
using GHistIndexRow = Span<uint32_t const>;
// A CSC matrix representing histogram cuts, used in CPU quantile hist.
class HistogramCuts {
// Using friends to avoid creating a virtual class, since HistogramCuts is used as value
// object in many places.
friend class SparseCuts;
friend class DenseCuts;
friend class CutsBuilder;
protected:
using BinIdx = uint32_t;
common::Monitor monitor_;
std::vector<bst_float> cut_values_;
std::vector<uint32_t> cut_ptrs_;
std::vector<float> min_vals_; // storing minimum value in a sketch set.
public:
HistogramCuts();
HistogramCuts(HistogramCuts const& that) = delete;
HistogramCuts(HistogramCuts&& that) noexcept(true) {
*this = std::forward<HistogramCuts&&>(that);
}
HistogramCuts& operator=(HistogramCuts const& that) = delete;
HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) {
monitor_ = std::move(that.monitor_);
cut_ptrs_ = std::move(that.cut_ptrs_);
cut_values_ = std::move(that.cut_values_);
min_vals_ = std::move(that.min_vals_);
return *this;
}
/* \brief Build histogram cuts. */
void Build(DMatrix* dmat, uint32_t const max_num_bins);
/* \brief How many bins a feature has. */
uint32_t FeatureBins(uint32_t feature) const {
return cut_ptrs_.at(feature+1) - cut_ptrs_[feature];
}
// Getters. Cuts should be of no use after building histogram indices, but currently
// it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve
// these for now.
std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_; }
std::vector<float> const& Values() const { return cut_values_; }
std::vector<float> const& MinValues() const { return min_vals_; }
size_t TotalBins() const { return cut_ptrs_.back(); }
BinIdx SearchBin(float value, uint32_t column_id) {
auto beg = cut_ptrs_.at(column_id);
auto end = cut_ptrs_.at(column_id + 1);
auto it = std::upper_bound(cut_values_.cbegin() + beg, cut_values_.cbegin() + end, value);
if (it == cut_values_.cend()) {
it = cut_values_.cend() - 1;
}
BinIdx idx = it - cut_values_.cbegin();
return idx;
}
BinIdx SearchBin(Entry const& e) {
return SearchBin(e.fvalue, e.index);
}
};
/* \brief An interface for building quantile cuts.
*
* `DenseCuts' always assumes there are `max_bins` for each feature, which makes it not
* suitable for sparse dataset. On the other hand `SparseCuts' uses `GetColumnBatches',
* which doubles the memory usage, hence can not be applied to dense dataset.
*/
class CutsBuilder {
public:
using WXQSketch = common::WXQuantileSketch<bst_float, bst_float>;
protected:
HistogramCuts* p_cuts_;
/* \brief return whether group for ranking is used. */
static bool UseGroup(DMatrix* dmat);
public:
explicit CutsBuilder(HistogramCuts* p_cuts) : p_cuts_{p_cuts} {}
virtual ~CutsBuilder() = default;
static uint32_t SearchGroupIndFromRow(
std::vector<bst_uint> const& group_ptr, size_t const base_rowid) {
using KIt = std::vector<bst_uint>::const_iterator;
KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid);
// Cannot use CHECK_NE because it will try to print the iterator.
bool const found = res != group_ptr.cend() - 1;
if (!found) {
LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!";
}
uint32_t group_ind = std::distance(group_ptr.cbegin(), res);
return group_ind;
}
void AddCutPoint(WXQSketch::SummaryContainer const& summary) {
if (summary.size > 1 && summary.size <= 16) {
/* specialized code categorial / ordinal data -- use midpoints */
for (size_t i = 1; i < summary.size; ++i) {
bst_float cpt = (summary.data[i].value + summary.data[i - 1].value) / 2.0f;
if (i == 1 || cpt > p_cuts_->cut_values_.back()) {
p_cuts_->cut_values_.push_back(cpt);
}
}
} else {
for (size_t i = 2; i < summary.size; ++i) {
bst_float cpt = summary.data[i - 1].value;
if (i == 2 || cpt > p_cuts_->cut_values_.back()) {
p_cuts_->cut_values_.push_back(cpt);
}
}
}
}
/* \brief Build histogram indices. */
virtual void Build(DMatrix* dmat, uint32_t const max_num_bins) = 0;
};
/*! \brief Cut configuration for sparse dataset. */
class SparseCuts : public CutsBuilder {
/* \brief Distrbute columns to each thread according to number of entries. */
static std::vector<size_t> LoadBalance(SparsePage const& page, size_t const nthreads);
Monitor monitor_;
public:
explicit SparseCuts(HistogramCuts* container) :
CutsBuilder(container) {
monitor_.Init(__FUNCTION__);
}
/* \brief Concatonate the built cuts in each thread. */
void Concat(std::vector<std::unique_ptr<SparseCuts>> const& cuts, uint32_t n_cols);
/* \brief Build histogram indices in single thread. */
void SingleThreadBuild(SparsePage const& page, MetaInfo const& info,
uint32_t max_num_bins,
bool const use_group_ind,
uint32_t beg, uint32_t end, uint32_t thread_id);
void Build(DMatrix* dmat, uint32_t const max_num_bins) override;
};
/*! \brief Cut configuration for dense dataset. */
class DenseCuts : public CutsBuilder {
protected:
Monitor monitor_;
public:
explicit DenseCuts(HistogramCuts* container) :
CutsBuilder(container) {
monitor_.Init(__FUNCTION__);
}
void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins);
void Build(DMatrix* p_fmat, uint32_t max_num_bins) override;
};
// FIXME(trivialfis): Merge this into generic cut builder.
/*! \brief Builds the cut matrix on the GPU.
*
* \return The row stride across the entire dataset.
*/
size_t DeviceSketch(int device,
int max_bin,
int gpu_batch_nrows,
DMatrix* dmat,
HistogramCuts* hmat);
/*!
* \brief preprocessed global index matrix, in CSR format
* Transform floating values to integer index in histogram
* This is a global histogram index.
*/
struct GHistIndexMatrix {
/*! \brief row pointer to rows by element position */
// std::vector<size_t> row_ptr;
SimpleArray<size_t> row_ptr;
/*! \brief The index data */
SimpleArray<uint32_t> index;
/*! \brief hit count of each index */
std::vector<size_t> hit_count;
/*! \brief The corresponding cuts */
HistogramCuts cut;
// Create a global histogram matrix, given cut
void Init(DMatrix* p_fmat, int max_num_bins);
// get i-th row
inline GHistIndexRow operator[](size_t i) const {
return {&index[0] + row_ptr[i],
static_cast<GHistIndexRow::index_type>(
row_ptr[i + 1] - row_ptr[i])};
}
inline void GetFeatureCounts(size_t* counts) const {
auto nfeature = cut.Ptrs().size() - 1;
for (unsigned fid = 0; fid < nfeature; ++fid) {
auto ibegin = cut.Ptrs()[fid];
auto iend = cut.Ptrs()[fid + 1];
for (auto i = ibegin; i < iend; ++i) {
counts[fid] += hit_count[i];
}
}
}
private:
std::vector<size_t> hit_count_tloc_;
};
struct GHistIndexBlock {
const size_t* row_ptr;
const uint32_t* index;
inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index)
: row_ptr(row_ptr), index(index) {}
// get i-th row
inline GHistIndexRow operator[](size_t i) const {
return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]};
}
};
class ColumnMatrix;
class GHistIndexBlockMatrix {
public:
void Init(const GHistIndexMatrix& gmat,
const ColumnMatrix& colmat,
const tree::TrainParam& param);
inline GHistIndexBlock operator[](size_t i) const {
return {blocks_[i].row_ptr_begin, blocks_[i].index_begin};
}
inline size_t GetNumBlock() const {
return blocks_.size();
}
private:
std::vector<size_t> row_ptr_;
std::vector<uint32_t> index_;
const HistogramCuts* cut_;
struct Block {
const size_t* row_ptr_begin;
const size_t* row_ptr_end;
const uint32_t* index_begin;
const uint32_t* index_end;
};
std::vector<Block> blocks_;
};
/*!
* \brief used instead of GradStats to have float instead of double to reduce histograms
* this improves performance by 10-30% and memory consumption for histograms by 2x
* accuracy in both cases is the same
*/
struct GradStatHist {
typedef float GradType;
/*! \brief sum gradient statistics */
GradType sum_grad;
/*! \brief sum hessian statistics */
GradType sum_hess;
GradStatHist() : sum_grad{0}, sum_hess{0} {
static_assert(sizeof(GradStatHist) == 8,
"Size of GradStatHist is not 8 bytes.");
}
inline void Add(const GradStatHist& b) {
sum_grad += b.sum_grad;
sum_hess += b.sum_hess;
}
inline void Add(const tree::GradStats& b) {
sum_grad += b.sum_grad;
sum_hess += b.sum_hess;
}
inline void Add(const GradientPair& p) {
this->Add(p.GetGrad(), p.GetHess());
}
inline void Add(const GradType& grad, const GradType& hess) {
sum_grad += grad;
sum_hess += hess;
}
inline tree::GradStats ToGradStat() const {
return tree::GradStats(sum_grad, sum_hess);
}
inline void SetSubstract(const GradStatHist& a, const GradStatHist& b) {
sum_grad = a.sum_grad - b.sum_grad;
sum_hess = a.sum_hess - b.sum_hess;
}
inline void SetSubstract(const tree::GradStats& a, const GradStatHist& b) {
sum_grad = a.sum_grad - b.sum_grad;
sum_hess = a.sum_hess - b.sum_hess;
}
inline GradType GetGrad() const { return sum_grad; }
inline GradType GetHess() const { return sum_hess; }
inline static void Reduce(GradStatHist& a, const GradStatHist& b) { // NOLINT(*)
a.Add(b);
}
};
using GHistRow = Span<GradStatHist>;
/*!
* \brief histogram of gradient statistics for multiple nodes
*/
class HistCollection {
public:
// access histogram for i-th node
inline GHistRow operator[](bst_uint nid) {
AddHistRow(nid);
return { const_cast<GradStatHist*>(dmlc::BeginPtr(data_arr_[nid])), nbins_};
}
// have we computed a histogram for i-th node?
inline bool RowExists(bst_uint nid) const {
return nid < data_arr_.size();
}
// initialize histogram collection
inline void Init(uint32_t nbins) {
if (nbins_ != nbins) {
data_arr_.clear();
nbins_ = nbins;
}
}
// create an empty histogram for i-th node
inline void AddHistRow(bst_uint nid) {
if (data_arr_.size() <= nid) {
size_t prev = data_arr_.size();
data_arr_.resize(nid + 1);
for (size_t i = prev; i < data_arr_.size(); ++i) {
data_arr_[i].resize(nbins_);
}
}
}
private:
/*! \brief number of all bins over all features */
uint32_t nbins_ = 0;
std::vector<std::vector<GradStatHist>> data_arr_;
};
/*!
* \brief builder for histograms of gradient statistics
*/
class GHistBuilder {
public:
// initialize builder
inline void Init(size_t nthread, uint32_t nbins) {
nthread_ = nthread;
nbins_ = nbins;
}
void BuildBlockHist(const std::vector<GradientPair>& gpair,
const RowSetCollection::Elem row_indices,
const GHistIndexBlockMatrix& gmatb,
GHistRow hist) {
constexpr int kUnroll = 8; // loop unrolling factor
const int32_t nblock = gmatb.GetNumBlock();
const size_t nrows = row_indices.end - row_indices.begin;
const size_t rest = nrows % kUnroll;
#pragma omp parallel for
for (int32_t bid = 0; bid < nblock; ++bid) {
auto gmat = gmatb[bid];
for (size_t i = 0; i < nrows - rest; i += kUnroll) {
size_t rid[kUnroll];
size_t ibegin[kUnroll];
size_t iend[kUnroll];
GradientPair stat[kUnroll];
for (int k = 0; k < kUnroll; ++k) {
rid[k] = row_indices.begin[i + k];
}
for (int k = 0; k < kUnroll; ++k) {
ibegin[k] = gmat.row_ptr[rid[k]];
iend[k] = gmat.row_ptr[rid[k] + 1];
}
for (int k = 0; k < kUnroll; ++k) {
stat[k] = gpair[rid[k]];
}
for (int k = 0; k < kUnroll; ++k) {
for (size_t j = ibegin[k]; j < iend[k]; ++j) {
const uint32_t bin = gmat.index[j];
hist[bin].Add(stat[k]);
}
}
}
for (size_t i = nrows - rest; i < nrows; ++i) {
const size_t rid = row_indices.begin[i];
const size_t ibegin = gmat.row_ptr[rid];
const size_t iend = gmat.row_ptr[rid + 1];
const GradientPair stat = gpair[rid];
for (size_t j = ibegin; j < iend; ++j) {
const uint32_t bin = gmat.index[j];
hist[bin].Add(stat);
}
}
}
}
uint32_t GetNumBins() {
return nbins_;
}
private:
/*! \brief number of threads for parallel computation */
size_t nthread_;
/*! \brief number of all bins over all features */
uint32_t nbins_;
};
void BuildHistLocalDense(size_t istart, size_t iend, size_t nrows, const size_t* rid,
const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr,
GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat);
void BuildHistLocalSparse(size_t istart, size_t iend, size_t nrows, const size_t* rid,
const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr,
GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat);
void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent);
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_HIST_UTIL_H_
|
GB_unop__erfc_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__erfc_fp32_fp32
// op(A') function: GB_unop_tran__erfc_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = erfcf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = erfcf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = erfcf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ERFC || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__erfc_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = erfcf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__erfc_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_pack16to8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack16to8_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 64u, 16, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 64u, 16, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator);
{
int nn_size = size / 8;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x8
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
img0 += size * 16;
tmpptr += 128;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x4
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
img0 += size * 16;
tmpptr += 64;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_load_ps(img0);
_mm512_store_ps(tmpptr, _val);
img0 += size * 16;
tmpptr += 16;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 16;
}
_mm256_store_ps(outptr0, _mm512_extractf32x8_ps(_sum0, 0));
_mm256_store_ps(outptr0 + 8, _mm512_extractf32x8_ps(_sum1, 0));
_mm256_store_ps(outptr0 + 8 * 2, _mm512_extractf32x8_ps(_sum2, 0));
_mm256_store_ps(outptr0 + 8 * 3, _mm512_extractf32x8_ps(_sum3, 0));
_mm256_store_ps(outptr0 + 8 * 4, _mm512_extractf32x8_ps(_sum4, 0));
_mm256_store_ps(outptr0 + 8 * 5, _mm512_extractf32x8_ps(_sum5, 0));
_mm256_store_ps(outptr0 + 8 * 6, _mm512_extractf32x8_ps(_sum6, 0));
_mm256_store_ps(outptr0 + 8 * 7, _mm512_extractf32x8_ps(_sum7, 0));
_mm256_store_ps(outptr1, _mm512_extractf32x8_ps(_sum0, 1));
_mm256_store_ps(outptr1 + 8, _mm512_extractf32x8_ps(_sum1, 1));
_mm256_store_ps(outptr1 + 8 * 2, _mm512_extractf32x8_ps(_sum2, 1));
_mm256_store_ps(outptr1 + 8 * 3, _mm512_extractf32x8_ps(_sum3, 1));
_mm256_store_ps(outptr1 + 8 * 4, _mm512_extractf32x8_ps(_sum4, 1));
_mm256_store_ps(outptr1 + 8 * 5, _mm512_extractf32x8_ps(_sum5, 1));
_mm256_store_ps(outptr1 + 8 * 6, _mm512_extractf32x8_ps(_sum6, 1));
_mm256_store_ps(outptr1 + 8 * 7, _mm512_extractf32x8_ps(_sum7, 1));
outptr0 += 64;
outptr1 += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 16;
}
_mm256_store_ps(outptr0, _mm512_extractf32x8_ps(_sum0, 0));
_mm256_store_ps(outptr0 + 8, _mm512_extractf32x8_ps(_sum1, 0));
_mm256_store_ps(outptr0 + 16, _mm512_extractf32x8_ps(_sum2, 0));
_mm256_store_ps(outptr0 + 24, _mm512_extractf32x8_ps(_sum3, 0));
_mm256_store_ps(outptr1, _mm512_extractf32x8_ps(_sum0, 1));
_mm256_store_ps(outptr1 + 8, _mm512_extractf32x8_ps(_sum1, 1));
_mm256_store_ps(outptr1 + 16, _mm512_extractf32x8_ps(_sum2, 1));
_mm256_store_ps(outptr1 + 24, _mm512_extractf32x8_ps(_sum3, 1));
outptr0 += 32;
outptr1 += 32;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum = _mm512_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(kptr);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 16;
}
_mm256_store_ps(outptr0, _mm512_extractf32x8_ps(_sum, 0));
_mm256_store_ps(outptr1, _mm512_extractf32x8_ps(_sum, 1));
outptr0 += 8;
outptr1 += 8;
}
}
remain_outch_start += nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 8);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
__m256 _sum4 = _sum0;
__m256 _sum5 = _sum0;
__m256 _sum6 = _sum0;
__m256 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
_sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3);
_sum4 = _mm256_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm256_fmadd_ps(_val5, _w0, _sum5);
_sum6 = _mm256_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm256_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr += 8;
}
_mm256_store_ps(outptr0, _sum0);
_mm256_store_ps(outptr0 + 8, _sum1);
_mm256_store_ps(outptr0 + 8 * 2, _sum2);
_mm256_store_ps(outptr0 + 8 * 3, _sum3);
_mm256_store_ps(outptr0 + 8 * 4, _sum4);
_mm256_store_ps(outptr0 + 8 * 5, _sum5);
_mm256_store_ps(outptr0 + 8 * 6, _sum6);
_mm256_store_ps(outptr0 + 8 * 7, _sum7);
outptr0 += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum0 = _mm256_loadu_ps(biasptr);
__m256 _sum1 = _sum0;
__m256 _sum2 = _sum0;
__m256 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
_sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1);
_sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr += 8;
}
_mm256_store_ps(outptr0, _sum0);
_mm256_store_ps(outptr0 + 8, _sum1);
_mm256_store_ps(outptr0 + 16, _sum2);
_mm256_store_ps(outptr0 + 24, _sum3);
outptr0 += 32;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const float* kptr = kernel.channel(p / 2 + p % 2);
int nn = inch * maxk * 16; // inch always > 0
__m256 _sum = _mm256_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m256 _w0 = _mm256_load_ps(kptr);
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
_sum = _mm256_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr += 8;
}
_mm256_store_ps(outptr0, _sum);
outptr0 += 8;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack16to8_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 16b-16a-maxk-inch/16a-outch/16b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(16 * 16 * maxk, inch / 16, outch / 16 + (outch % 16) / 8, (size_t)4u);
int q = 0;
for (; q + 15 < outch; q += 16)
{
float* g00 = kernel_tm.channel(q / 16);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q + 7 < outch; q += 8)
{
float* g00 = kernel_tm.channel(q / 16 + (q % 16) / 8);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 8; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack16to8_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _v = _mm512_load_ps(sptr);
_mm512_store_ps(ptr, _v);
sptr += stride_w * 16;
ptr += 16;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack16to8_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
quicksort_hybrid.c | /*
* Recursive hybrid-parallel implementation of Quicksort (not optimized!)
* This code is to be used in conjunction with exercises in module [B1] Hybrid Algorithm
*
* Need OpenMP 5.0 and GCC 11 to compiler (recursive offloading)
*
* @author: Apan Qasem <apan@txstate.edu>
* @date: 04/02/20
*
* @update: 03/13/21
*/
#include<stdlib.h>
#include<stdio.h>
#include<omp.h>
#define VAL_RANGE 1024
#define ELEMENTS_TO_VERIFY 5
void swap(double *x, double *y) {
double tmp;
tmp = (*x);
(*x) = (*y);
(*y) = tmp;
return;
}
/*
* partition array for quicksort
* - move pivot to far right
* - accumulate values smaller than pivot to the left
*/
int partition(double values[], int left, int right, int pivotIndex) {
double pivotValue = values[pivotIndex];
swap(&values[pivotIndex],&values[right]); // Move pivot to end
int storeIndex = left;
for(int i = left; i < right; i++) {
if (values[i] < pivotValue) {
swap(&values[i],&values[storeIndex]);
storeIndex++;
}
}
swap(&values[storeIndex],&values[right]); // Move pivot to its final place
return storeIndex;
}
/*
* recursive quicksort
*/
void quickSort(double values[], int left, int right) {
#pragma omp parallel
{
#pragma omp single
{
if (left < right) {
int pivotIndex = (left + right)/2;
int pivotNewIndex = partition(values, left, right, pivotIndex);
#pragma omp target
quickSort(values, left, pivotNewIndex - 1);
#pragma omp task
quickSort(values, pivotNewIndex + 1, right);
}
}
}
return;
}
/*
* display array contents
*/
void display(double values[], long long N) {
for (int i = 0; i < N; i++)
fprintf(stdout, "%3.4f ", values[i]);
fprintf(stdout, "\n");
}
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("usage: \n");
printf(" ./quicksort N threads\n");
printf(" N = input size\n");
printf(" t = number of OpenMP threads\n");
exit(0);
}
long long N = atoi(argv[1]);
unsigned threads = atoi(argv[2]);
omp_set_num_threads(threads);
double *values = (double *) malloc(sizeof(double) * N);
for (int i = 0; i < N; i++)
values[i] = rand() / (double) (RAND_MAX/VAL_RANGE);
quickSort(values, 0, N - 1);
fprintf(stdout, "Sorted values [0..%d]: ", ELEMENTS_TO_VERIFY - 1);
display(values, ELEMENTS_TO_VERIFY);
return 0;
}
|
GB_unop__identity_int32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint32)
// op(A') function: GB (_unop_tran__identity_int32_uint32)
// C type: int32_t
// A type: uint32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint32)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
LAGraph_cc_fastsv5b.c | //------------------------------------------------------------------------------
// LAGraph_cc_fastsv5b: connected components
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
/**
* Code is based on the algorithm described in the following paper
* Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component
* Algorithm with Fast Convergence (SIAM PP20)
*
* Modified by Tim Davis, Texas A&M University
**/
// The input matrix A must be symmetric. Self-edges (diagonal entries) are
// OK, and are ignored. The values and type of A are ignored; just its
// pattern is accessed.
// The matrix A must have dimension 2^32 or less. If it is larger, use the
// 64-bit version of this method instead. TODO combine the two versions into a
// single user-callable code.
#include "LAGraph.h"
//------------------------------------------------------------------------------
// Reduce_assign32: w (index) += src, using MIN as the "+=" accum operator
//------------------------------------------------------------------------------
// mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL.
// Duplicates are summed with the accumulator, which differs from how
// GrB_assign works. GrB_assign states that the presence of duplicates results
// in undefined behavior. SuiteSparse:GraphBLAS follows the MATLAB rule, which
// discards all but the first of the duplicates. TODO: add this to GraphBLAS
// as a variant of GrB_assign, either as GxB_assign_accum (or another name),
// or as a GxB_* descriptor setting.
#define LAGRAPH_FREE_ALL
// hash table
const int P = 1024;
int *ht_key;
int *ht_val;
#define HASH(x) (((x << 4) + x) & 1023)
#define NEXT(x) ((x + 23) & 1023)
static void ht_malloc ()
{
ht_key = LAGraph_malloc (P, sizeof (int));
ht_val = LAGraph_malloc (P, sizeof (int));
}
static void ht_init ()
{
memset(ht_key, -1, sizeof(int) * P);
memset(ht_val, 0, sizeof(int) * P);
}
static void ht_free ()
{
LAGRAPH_FREE (ht_key) ;
LAGRAPH_FREE (ht_val) ;
}
static void ht_sample (uint32_t *V32, int n, int samples)
{
for (int i = 0; i < samples; i++) {
int x = V32 [rand() % n];
int h = HASH (x);
while (ht_key [h] != -1 && ht_key [h] != x)
h = NEXT (h);
ht_key [h] = x;
ht_val [h] += 1;
}
}
static int ht_most_frequent ()
{
int key = -1, val = 0;
for (int i = 0; i < P; i++)
if (ht_val [i] > val)
{
key = ht_key [i];
val = ht_val [i];
}
return key;
}
static GrB_Info Reduce_assign32
(
GrB_Vector *w_handle, // vector of size n, all entries present
GrB_Vector *s_handle, // vector of size n, all entries present
uint32_t *index, // array of size n
GrB_Index n,
int nthreads
)
{
GrB_Type w_type, s_type ;
GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ;
uint32_t *w_x, *s_x ;
LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i,
(void **) &w_x, NULL) ;
LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i,
(void **) &s_x, NULL) ;
if (nthreads >= 4)
{
uint32_t *mem = LAGraph_malloc (nthreads * P, sizeof (uint32_t));
ht_init () ;
ht_sample (index, n, 864) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int t = 0; t < nthreads; t++)
{
uint32_t *buf = mem + t * P;
for (int h = 0; h < P; h++)
if (ht_key [h] != -1)
buf [h] = w_x [ht_key [h]];
int st = (n * t + nthreads - 1) / nthreads;
int ed = (n * t + n + nthreads - 1) / nthreads;
for (int k = st ; k < ed ; k++)
{
uint32_t i = index [k] ;
int h = HASH(i);
while (ht_key [h] != -1 && ht_key [h] != i)
h = NEXT (h);
if (ht_key [h] == -1)
w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]);
else
buf [h] = LAGRAPH_MIN (buf [h], s_x [k]);
}
}
for (int h = 0; h < P; h++)
{
int i = ht_key [h];
if (i != -1)
for (int j = 0; j < nthreads; j++)
w_x [i] = LAGRAPH_MIN (w_x [i], mem [j * P + h]);
}
LAGRAPH_FREE (mem);
}
else
{
// sequential version, to avoid atomics
for (GrB_Index k = 0 ; k < n ; k++)
{
uint32_t i = index [k] ;
w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ;
}
}
LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i,
(void **) &w_x, NULL) ;
LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i,
(void **) &s_x, NULL) ;
return (GrB_SUCCESS) ;
}
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE (I) ; \
LAGRAPH_FREE (V32) ; \
LAGr_free (&f) ; \
LAGr_free (&gp) ; \
LAGr_free (&mngp) ; \
LAGr_free (&gp_new) ; \
LAGr_free (&mod) ; \
}
//------------------------------------------------------------------------------
// LAGraph_cc_fastsv5
//------------------------------------------------------------------------------
GrB_Info LAGraph_cc_fastsv5b
(
GrB_Vector *result, // output: array of component identifiers
GrB_Matrix *A, // input matrix
bool sanitize // if true, ensure A is symmetric
)
{
GrB_Info info ;
uint32_t *V32 = NULL ;
GrB_Index n, nnz, *I = NULL ;
GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ;
GrB_Matrix S = NULL, T = NULL ;
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
LAGr_Matrix_nrows (&n, *A) ;
LAGr_Matrix_nvals (&nnz, *A) ;
if (n > UINT32_MAX)
{
LAGRAPH_ERROR ("problem too large; use 64-bit version instead",
GrB_INVALID_VALUE) ;
}
#define FASTSV_SAMPLES 4
GxB_Format_Value format;
LAGRAPH_OK (GxB_get (*A , GxB_FORMAT, &format)) ;
bool sampling = (format == GxB_BY_ROW) && (n * FASTSV_SAMPLES * 2 < nnz);
if (sanitize)
{
// S = A | A'
LAGr_Matrix_new (&S, GrB_BOOL, n, n) ;
LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, *A, *A, LAGraph_desc_otoo) ;
}
else
{
// Use the input as-is, and assume it is symmetric
// LAGr_Matrix_dup (&S, A) ;
S = *A;
}
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
// determine # of threads to use for Reduce_assign
int nthreads = LAGraph_get_nthreads ( ) ;
// # of threads to use for typecast
int nthreads2 = n / (64*1024) ;
nthreads2 = LAGRAPH_MIN (nthreads2, nthreads) ;
nthreads2 = LAGRAPH_MAX (nthreads2, 1) ;
// vectors
LAGr_Vector_new (&f, GrB_UINT32, n) ;
LAGr_Vector_new (&gp_new, GrB_UINT32, n) ;
LAGr_Vector_new (&mod, GrB_BOOL, n) ;
// temporary arrays
I = LAGraph_malloc (n, sizeof (GrB_Index)) ;
V32 = LAGraph_malloc (n, sizeof (uint32_t)) ;
// prepare vectors
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (GrB_Index i = 0 ; i < n ; i++)
{
I [i] = i ;
V32 [i] = (uint32_t) i ;
}
LAGr_Vector_build (f, I, V32, n, GrB_PLUS_UINT32) ;
LAGr_Vector_dup (&gp, f) ;
LAGr_Vector_dup (&mngp, f) ;
ht_malloc ();
//--------------------------------------------------------------------------
// main computation
//--------------------------------------------------------------------------
if (sampling)
{
GrB_Type type;
GrB_Index nrows, ncols, nvals;
int64_t nonempty;
GrB_Index *Sp, *Sj;
void *Sx;
GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &nvals,
&nonempty, &Sp, &Sj, &Sx, NULL);
GrB_Index *Tp = LAGraph_malloc (nrows+1, sizeof (GrB_Index)) ;
GrB_Index *Tj = LAGraph_malloc (nvals, sizeof (GrB_Index)) ;
void *Tx = LAGraph_malloc (nvals, 1) ;
int *range = LAGraph_malloc (nthreads + 1, sizeof (int)) ;
GrB_Index *count = LAGraph_malloc (nthreads + 1, sizeof (GrB_Index)) ;
memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ;
for (int i = 0; i <= nthreads; i++)
range [i] = (n * i + nthreads - 1) / nthreads;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int t = 0; t < nthreads; t++)
{
for (int i = range[t]; i < range[t + 1]; i++)
{
int deg = Sp [i + 1] - Sp [i];
count [t + 1] += LAGRAPH_MIN (FASTSV_SAMPLES, deg) ;
}
}
for (int i = 0; i < nthreads; i++)
count [i + 1] += count [i];
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int t = 0; t < nthreads; t++)
{
GrB_Index p = count [t];
Tp [range [t]] = p;
for (int i = range[t]; i < range[t + 1]; i++)
{
for (int j = 0; j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1]; j++)
Tj [p++] = Sj [Sp [i] + j];
Tp [i + 1] = p;
}
}
GrB_Index t_nvals = Tp[nrows];
GxB_Matrix_import_CSR (&T, type, nrows, ncols, t_nvals,
-1, &Tp, &Tj, &Tx, NULL);
bool change = true, is_first = true;
while (change)
{
// hooking & shortcutting
LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, T, gp,
NULL) ;
if (!is_first)
LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ;
// old:
// LAGr_eWiseMult (f, NULL, NULL, GrB_MIN_UINT32, f, mngp, NULL) ;
// LAGr_eWiseMult (f, NULL, NULL, GrB_MIN_UINT32, f, gp, NULL) ;
// new:
LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp,
NULL) ;
// calculate grandparent
LAGr_Vector_extractTuples (NULL, V32, &n, f) ;
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (uint32_t i = 0 ; i < n ; i++)
{
I [i] = (GrB_Index) V32 [i] ;
}
LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ;
// check termination
LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ;
LAGr_reduce (&change, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
is_first = false;
}
ht_init() ;
ht_sample (V32, n, 864) ;
int key = ht_most_frequent() ;
int64_t t_nonempty;
GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &t_nvals,
&t_nonempty, &Tp, &Tj, &Tx, NULL);
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int t = 0; t < nthreads; t++)
{
GrB_Index ptr = Sp[range[t]];
for (int v = range[t]; v < range[t + 1]; v++)
{
int pv = V32 [v];
Tp [v] = ptr;
if (pv != key)
{
for (GrB_Index i = Sp [v]; i < Sp [v + 1]; i++)
{
int u = Sj [i];
if (V32 [u] != key)
Tj [ptr++] = u;
}
if (ptr - Tp[v] < Sp [v + 1] - Sp [v])
Tj [ptr++] = key;
}
}
count[t] = ptr - Tp [range [t]];
}
GrB_Index offset = 0;
for (int i = 0; i < nthreads; i++)
{
memcpy(Tj + offset, Tj + Tp [range [i]], sizeof(GrB_Index) * count[i]);
offset += count[i];
count[i] = offset - count[i];
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int t = 0; t < nthreads; t++)
{
GrB_Index ptr = Tp [range [t]];
for (int v = range[t]; v < range[t + 1]; v++)
Tp [v] -= ptr - count[t];
}
Tp [n] = offset;
LAGRAPH_FREE (count);
LAGRAPH_FREE (range);
GxB_Matrix_import_CSR (&S, type, nrows, ncols, nvals,
nonempty, &Sp, &Sj, &Sx, NULL);
GxB_Matrix_import_CSR (&T, type, nrows, ncols, offset,
-1, &Tp, &Tj, &Tx, NULL);
}
else
{
T = S;
}
LAGr_Matrix_nvals (&nnz, T);
bool change = true;
while (change && nnz > 0)
{
// hooking & shortcutting
LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, T, gp,
NULL) ;
LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ;
// old:
// LAGr_eWiseMult (f, NULL, NULL, GrB_MIN_UINT32, f, mngp, NULL) ;
// LAGr_eWiseMult (f, NULL, NULL, GrB_MIN_UINT32, f, gp, NULL) ;
// new:
LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL);
// calculate grandparent
LAGr_Vector_extractTuples (NULL, V32, &n, f) ;
#pragma omp parallel for num_threads(nthreads2) schedule(static)
for (uint32_t i = 0 ; i < n ; i++)
{
I [i] = (GrB_Index) V32 [i] ;
}
LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ;
// check termination
LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ;
LAGr_reduce (&change, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ;
// swap gp and gp_new
GrB_Vector t = gp ; gp = gp_new ; gp_new = t ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
ht_free () ;
*result = f ;
f = NULL ;
if (!sanitize)
*A = S;
else
LAGr_free (&S) ;
if (sampling)
LAGr_free (&T) ;
LAGRAPH_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[MaxPixelChannels];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),(size_t)
GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
ssize_t
i;
size_t
columns,
number_images,
rows;
number_images=GetImageListLength(images);
rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(number_images,MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0.0 ? -1 : distance > 0.0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise,
value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((double) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*next;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
j,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
image_view[j]=AcquireVirtualCacheView(next,exception);
next=GetNextImageInList(next);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory((void *) p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
ssize_t
i,
x;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory((void *) p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (j=0; j < (ssize_t) number_images; j++)
image_view[j]=DestroyCacheView(image_view[j]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
clamp=MagickFalse;
artifact=GetImageArtifact(image,"evaluate:clamp");
if (artifact != (const char *) NULL)
clamp=IsStringTrue(artifact);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=clamp != MagickFalse ? ClampPixel(result) : ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0*PerceptibleReciprocal(width)*(QuantumScale*pixel-center);
if (result <= -1.0)
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e d i a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMedian() returns the median pixel of one or more image channels.
%
% The format of the GetImageMedian method is:
%
% MagickBooleanType GetImageMedian(const Image *image,double *median,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o median: the average value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMedian(const Image *image,double *median,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*median=channel_statistics[CompositePixelChannel].median;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
channels,
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
centroid[channel].x=M10[channel]*PerceptibleReciprocal(M00[channel]);
centroid[channel].y=M01[channel]*PerceptibleReciprocal(M00[channel]);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
channels=(double) GetImageChannels(image);
M00[MaxPixelChannels]/=channels;
M01[MaxPixelChannels]/=channels;
M02[MaxPixelChannels]/=channels;
M03[MaxPixelChannels]/=channels;
M10[MaxPixelChannels]/=channels;
M11[MaxPixelChannels]/=channels;
M12[MaxPixelChannels]/=channels;
M20[MaxPixelChannels]/=channels;
M21[MaxPixelChannels]/=channels;
M22[MaxPixelChannels]/=channels;
M30[MaxPixelChannels]/=channels;
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]*
PerceptibleReciprocal(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+1.0)/2.0));
M20[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+0.0)/2.0));
M02[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+2.0)/2.0));
M21[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+1.0)/2.0));
M12[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+2.0)/2.0));
M22[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+2.0)/2.0));
M30[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(3.0+0.0)/2.0));
M03[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+3.0)/2.0));
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*p,
*q;
const char
*artifact;
MagickBooleanType
status;
ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t GetMedianPixel(Quantum *pixels,const size_t n)
{
#define SwapPixels(alpha,beta) \
{ \
Quantum gamma=(alpha); \
(alpha)=(beta);(beta)=gamma; \
}
ssize_t
low = 0,
high = (ssize_t) n-1,
median = (low+high)/2;
for ( ; ; )
{
ssize_t
l = low+1,
h = high,
mid = (low+high)/2;
if (high <= low)
return(median);
if (high == (low+1))
{
if (pixels[low] > pixels[high])
SwapPixels(pixels[low],pixels[high]);
return(median);
}
if (pixels[mid] > pixels[high])
SwapPixels(pixels[mid],pixels[high]);
if (pixels[low] > pixels[high])
SwapPixels(pixels[low], pixels[high]);
if (pixels[mid] > pixels[low])
SwapPixels(pixels[mid],pixels[low]);
SwapPixels(pixels[mid],pixels[low+1]);
for ( ; ; )
{
do l++; while (pixels[low] > pixels[l]);
do h--; while (pixels[h] > pixels[low]);
if (h < l)
break;
SwapPixels(pixels[l],pixels[h]);
}
SwapPixels(pixels[low],pixels[h]);
if (h <= median)
low=l;
if (h >= median)
high=h-1;
}
}
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
channels,
*histogram,
standard_deviation;
MagickStatusType
status;
MemoryInfo
*median_info;
Quantum
*median;
QuantumAny
range;
ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
if (channel_statistics[channel].depth >
channel_statistics[CompositePixelChannel].depth)
channel_statistics[CompositePixelChannel].depth=
channel_statistics[channel].depth;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
median_info=AcquireVirtualMemory(image->columns,image->rows*sizeof(*median));
if (median_info == (MemoryInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
else
{
ssize_t
i;
median=(Quantum *) GetVirtualMemoryBlob(median_info);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
size_t
n = 0;
/*
Compute median statistics for each channel.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
median[n++]=p[i];
}
p+=GetPixelChannels(image);
}
channel_statistics[channel].median=(double) median[
GetMedianPixel(median,n)];
}
median_info=RelinquishVirtualMemory(median_info);
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].median=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].median+=
channel_statistics[i].median;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channels=(double) GetImageChannels(image);
channel_statistics[CompositePixelChannel].mean/=channels;
channel_statistics[CompositePixelChannel].median/=channels;
channel_statistics[CompositePixelChannel].standard_deviation/=channels;
channel_statistics[CompositePixelChannel].entropy/=channels;
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
ssize_t
i,
x;
PixelChannels
*polynomial_pixel;
Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
SkipList
*p;
ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
SkipNode
*root;
SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
area,
maximum,
minimum,
sum,
sum_squared;
Quantum
pixel;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
area=0.0;
minimum=pixels[i];
maximum=pixels[i];
sum=0.0;
sum_squared=0.0;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
if ((type == MedianStatistic) || (type == ModeStatistic) ||
(type == NonpeakStatistic))
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
continue;
}
area++;
if (pixels[i] < minimum)
minimum=(double) pixels[i];
if (pixels[i] > maximum)
maximum=(double) pixels[i];
sum+=(double) pixels[i];
sum_squared+=(double) pixels[i]*pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case ContrastStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue((maximum-minimum)*
PerceptibleReciprocal(maximum+minimum)));
break;
}
case GradientStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
pixel=ClampToQuantum(maximum);
break;
}
case MeanStatistic:
default:
{
pixel=ClampToQuantum(sum/area);
break;
}
case MedianStatistic:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
pixel=ClampToQuantum(minimum);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area));
break;
}
case StandardDeviationStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area)));
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
matadd.c | #include "matrix.h"
/** \brief Adds two matrices
*
* \param[in] A First input matrix
* \param[in] B Second input matrix
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A}+\mathbf{B} \f$
*
*/
MATRIX mat_add(MATRIX A, MATRIX B, MATRIX result)
{
int i, j, m, n, o, p;
m = MatCol(A);
n = MatRow(A);
o = MatCol(B);
p = MatRow(B);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
if(o==m &&p==n)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]+B[i][j];
}
}
}
else if(o==1 && p!=1)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]+B[i][0];
}
}
}
else if(p==1 && o!=1)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]+B[0][j];
}
}
}
else gen_error(GEN_SIZEMISMATCH);
return result;
}
/** \brief Adds a scalar to a matrix
*
* \param[in] A Input matrix
* \param[in] s Input scalar
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A}+s\mathbf{11}^T \f$
*
*/
MATRIX mat_adds(MATRIX A, mtype s, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat( MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]+s;
}
}
return result;
}
/** \brief Adds two integer vectors
*
* \param[in] A Input vector
* \param[in] B Input vector
* \param[in] result Vector to store the result
* \return \f$ \mathbf{A}+\mathbf{B} \f$
*
*/
INT_VECTOR int_vec_add(INT_VECTOR A, INT_VECTOR B, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
if(m!=Int_VecLen(B))gen_error(GEN_SIZEMISMATCH);
for(i=0; i<m; ++i) result[i] = A[i]+B[i];
return result;
}
/** \brief Adds an integer to an integer vector
*
* \param[in] A Input vector
* \param[in] s Input scalar
* \param[in] result Vector to store the result
* \return \f$ \mathbf{A}+s\mathbf{1} \f$
*
*/
INT_VECTOR int_vec_adds(INT_VECTOR A, int s, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
for(i=0; i<m; ++i) result[i] = A[i]+s;
return result;
}
|
gsrb.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
//------------------------------------------------------------------------------------------------------------------------------
// better solution would be to adapt the box size as the problem shrinks...
// i.e. fix unit stride at 4KB and calculate BlockJ = ((STANZA+dim.i-1)/dim.i)
// similarly, fix BlockK to get some reuse and have enough tasks...
//------------------------------------------------------------------------------------------------------------------------------
// Kludge for now...
#define BlockJ 16
#define BlockK 4
//------------------------------------------------------------------------------------------------------------------------------
void __box_smooth_GSRB_multiple(box_type *box, int phi_id, int rhs_id, double a, double b, int s){
int jj,kk;
int pencil = box->pencil;
int plane = box->plane;
int ghosts = box->ghosts;
double h2inv = 1.0/(box->h*box->h);
double * __restrict__ phi = box->grids[ phi_id] + ghosts*plane + ghosts*pencil + ghosts; // i.e. [0] = first non ghost zone point
double * __restrict__ phi_new= box->grids[ phi_id] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ rhs = box->grids[ rhs_id] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ alpha = box->grids[__alpha ] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ beta_i = box->grids[__beta_i] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ beta_j = box->grids[__beta_j] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ beta_k = box->grids[__beta_k] + ghosts*plane + ghosts*pencil + ghosts;
double * __restrict__ lambda = box->grids[__lambda] + ghosts*plane + ghosts*pencil + ghosts;
int ghostsToOperateOn=ghosts-1;
int ss;
int big_box=0;
// don't subdivide small boxes into tasks (too much overhead from omp task...)
if(box->dim.k>8)big_box=1;
if(box->dim.j>8)big_box=1;
// do ghosts iterations on this list of tasks...
for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){
// iterate through all cache blocks within this box and queue a task...
for(kk=0-ghostsToOperateOn;kk<box->dim.k+ghostsToOperateOn;kk+=BlockK){
for(jj=0-ghostsToOperateOn;jj<box->dim.j+ghostsToOperateOn;jj+=BlockJ){
#pragma omp task if(big_box)
{
int i,j,k;
int highK,highJ;
highK = MIN(kk+BlockK,box->dim.k+ghostsToOperateOn);
highJ = MIN(jj+BlockJ,box->dim.j+ghostsToOperateOn);
#if defined(__GSRB_CONDITIONAL)
#warning GSRB on every point with conditional assignment for Red-Black
for(k=kk;k<highK;k++){
for(j=jj;j<highJ;j++){
#pragma simd always
for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){
int ijk = i + j*pencil + k*plane;
int doit = ((i^(j^k^ss^1))&1);
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
//double delta = doit ? lambda[ijk]*(helmholtz-rhs[ijk]) : 0.0;
//phi_new[ijk] = phi[ijk] - delta;
phi_new[ijk] = doit ? phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]) : phi[ijk];
}}}
#elif defined(__GSRB_STRIDE2)
#warning GSRB using stride-2 accesses
for(k=kk;k<highK;k++){
for(j=jj;j<highJ;j++){
for(i=((j^k^ss^1)&1)+1-ghosts;i<box->dim.i+ghostsToOperateOn;i+=2){ // stride-2 GSRB
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]);
}}}
#elif defined(__GSRB_FP)
#warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black
for(k=kk;k<highK;k++){int EvenOdd = (k^ss)&1;
for(j=jj;j<highJ;j++){
for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){
int ij = i + j*pencil;
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - RedBlack[EvenOdd][ij]*lambda[ijk]*(helmholtz-rhs[ijk]); // compiler seems to get confused unless there are disjoint read/write pointers
}}}
#else
#warning GSRB using if-then-else on loop indices for Red-Black
for(k=kk;k<highK;k++){
for(j=jj;j<highJ;j++){
for(i=0-ghostsToOperateOn;i<box->dim.i+ghostsToOperateOn;i++){
if((i^j^k^ss^1)&1){ // looks very clean when [0] is i,j,k=0,0,0
int ijk = i + j*pencil + k*plane;
double helmholtz = a*alpha[ijk]*phi[ijk]
-b*h2inv*(
beta_i[ijk+1 ]*( phi[ijk+1 ]-phi[ijk ] )
-beta_i[ijk ]*( phi[ijk ]-phi[ijk-1 ] )
+beta_j[ijk+pencil]*( phi[ijk+pencil]-phi[ijk ] )
-beta_j[ijk ]*( phi[ijk ]-phi[ijk-pencil] )
+beta_k[ijk+plane ]*( phi[ijk+plane ]-phi[ijk ] )
-beta_k[ijk ]*( phi[ijk ]-phi[ijk-plane ] )
);
phi_new[ijk] = phi[ijk] - lambda[ijk]*(helmholtz-rhs[ijk]);
}}}}
#endif
}}}
// If doing communication avoiding, we dependent tasks cannot get too far ahead.
// As I have no idea how to perform p2p synchronization among omp tasks, I'll just barrier...
if(ghostsToOperateOn>0){
#pragma omp taskwait
}
} // ss
}
//------------------------------------------------------------------------------------------------------------------------------
void smooth(domain_type * domain, int level, int phi_id, int rhs_id, double a, double b){
int box,s;
int ghosts = domain->ghosts;
// if communication-avoiding, need RHS for stencils in ghost zones
if(ghosts>1)exchange_boundary(domain,level,rhs_id,1,1,1);
for(s=0;s<numSmooths;s+=ghosts){
exchange_boundary(domain,level,phi_id,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding...
uint64_t _timeStart = CycleTime();
#pragma omp parallel
{
int box;
#pragma omp for private(box) nowait // <<< needs to be omp for rather than single in order to get enough task injection. <<< needs to be no wait to ensure idle cores can grab tasks asap
for(box=0;box<domain->subdomains_per_rank;box++){
__box_smooth_GSRB_multiple(&domain->subdomains[box].levels[level],phi_id,rhs_id,a,b,s);
}
}
domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart);
}
}
|
fc_kernel_int8_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
#include "fc_kernel_int8_arm.h"
void gemv_1x8_int8(int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8x8_t input;
int8x16_t weight_0_1, weight_2_3, weight_4_5, weight_6_7;
int16x8_t weight0_16, weight1_16, weight2_16, weight3_16;
int16x8_t weight4_16, weight5_16, weight6_16, weight7_16;
int32x4_t res = {0, 0, 0, 0};
int32x4_t res1 = {0, 0, 0, 0};
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size >> 3) << 3;
for (int i = 0; i < remainw; i = i + 8) {
input = vld1_s8(input_ptr);
weight_0_1 = vld1q_s8(weight_ptr);
weight_2_3 = vld1q_s8(weight_ptr + 16);
weight_4_5 = vld1q_s8(weight_ptr + 32);
weight_6_7 = vld1q_s8(weight_ptr + 48);
weight0_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 0)), vget_low_s8(weight_0_1));
weight1_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 1)), vget_high_s8(weight_0_1));
weight2_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 2)), vget_low_s8(weight_2_3));
weight3_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 3)), vget_high_s8(weight_2_3));
weight4_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 4)), vget_low_s8(weight_4_5));
weight5_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 5)), vget_high_s8(weight_4_5));
weight6_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 6)), vget_low_s8(weight_6_7));
weight7_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 7)), vget_high_s8(weight_6_7));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight0_16), vget_low_s16(weight1_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight2_16), vget_low_s16(weight3_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight4_16), vget_low_s16(weight5_16)));
res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight6_16), vget_low_s16(weight7_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight0_16), vget_high_s16(weight1_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight2_16), vget_high_s16(weight3_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight4_16), vget_high_s16(weight5_16)));
res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight6_16), vget_high_s16(weight7_16)));
input_ptr += 8;
weight_ptr += 64;
}
for (int i = remainw; i < kernel_size; ++i) {
weight0_16 = vmull_s8(vdup_n_s8(input_ptr[0]), vld1_s8(weight_ptr));
res = vaddq_s32(vmovl_s16(vget_low_s16(weight0_16)), res);
res1 = vaddq_s32(vmovl_s16(vget_high_s16(weight0_16)), res1);
input_ptr += 1;
weight_ptr += 8;
}
if (biases) {
int32x4_t bias = vld1q_s32(biases);
int32x4_t bias1 = vld1q_s32(biases + 4);
res = vaddq_s32(res,bias);
res1 = vaddq_s32(res1,bias1);
}
float32x4_t res_f = vcvtq_f32_s32(res);
float32x4_t res1_f = vcvtq_f32_s32(res1);
float32x4_t scale = vld1q_f32(scales);
float32x4_t scale_1 = vld1q_f32(scales + 4);
res_f = vmulq_f32(res_f, scale);
res1_f = vmulq_f32(res1_f, scale_1);
res_f = vaddq_f32(res_f,vdupq_n_f32(0.5f));
res1_f = vaddq_f32(res1_f,vdupq_n_f32(0.5f));
res = vcvtq_s32_f32(res_f);
res1 = vcvtq_s32_f32(res1_f);
int16x4_t res_16 = vmovn_s32(res);
int16x4_t res1_16 = vmovn_s32(res1);
int8x8_t result = vmovn_s16(vcombine_s16(res_16, res1_16));
int8x8_t _m127 = vdup_n_s8(127);
int8x8_t _m_127 = vdup_n_s8(-127);
result = vmax_s8(_m_127, result);
result = vmin_s8(_m127, result);
vst1_s8(output, result);
}
void gemv_1x2_int8(const int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size,
int8_t *output) {
int8_t *input_ptr = inp;
int8_t *weight_ptr = kernel;
int remainw = (kernel_size << 3) >> 3;
int8x8x2_t weight;
int8x8_t input;
int16x8_t out_16_0, out_16_1, out_32_0, out_32_1;
int32_t sum0 = 0, sum1 = 0;
for (int i = 0; i < remainw; i = i + 8) {
weight = vld2_s8(weight_ptr);
input = vld1_s8(input_ptr);
out_16_0 = vmull_s8(weight.val[0], input);
out_16_1 = vmull_s8(weight.val[1], input);
out_32_0 = vpaddlq_s16(out_16_0);
out_32_1 = vpaddlq_s16(out_16_1);
sum0 += vgetq_lane_s32(out_32_0, 0) + vgetq_lane_s32(out_32_0, 1) +
vgetq_lane_s32(out_32_0, 2) + vgetq_lane_s32(out_32_0, 3);
sum1 += vgetq_lane_s32(out_32_1, 0) + vgetq_lane_s32(out_32_1, 1) +
vgetq_lane_s32(out_32_1, 2) + vgetq_lane_s32(out_32_1, 3);
weight_ptr += 16;
input_ptr += 8;
}
for (int i = remainw; i < kernel_size; ++i) {
sum0 += weight_ptr[0] * input_ptr[0];
sum1 += weight_ptr[1] * input_ptr[0];
input_ptr++;
weight_ptr += 2;
}
if (biases) {
sum0 += biases[0];
sum1 += biases[1];
}
int data_i32_0 = round(sum0 * scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
int data_i32_1 = round(sum1 * scales[1]);
if (data_i32_1 > 127)
data_i32_1 = 127;
else if (data_i32_0 < -127)
data_i32_1 = -127;
output[0] = data_i32_0;
output[1] = data_i32_1;
}
// start and end channel must be 8 aligned
void gemv1x8(const int8_t *input, const int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size, int start_channel, int end_channel, int num_thread,
int cpu_affinity) {
int ch = 0;
int8_t *cur_kernel, *cur_result;
int32_t *cur_biases;
const float *cur_scales;
// #pragma omp parallel for num_threads(num_thread)
for (ch = start_channel; ch < end_channel; ch += 8) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x8_int8(cur_biases, cur_scales, (int8_t *) input, cur_kernel, kernel_size,
cur_result);
}
}
// start channel must be 2 aligned
void gemv1x2(const int8_t *input, int8_t *output, int8_t *weight_interleaved,
const int32_t *biases, const float *scales,
int kernel_size,int start_channel,int end_channel,int num_thread,int cpu_affinity)
{
int32_t sum;
int ch = 0;
int8_t *cur_kernel;
int32_t *cur_biases;
int8_t *cur_result;
const float* cur_scales;
for (ch = start_channel; ch < (end_channel & -2); ch += 2) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
cur_biases = biases ? (int32_t *) (biases + ch) : NULL;
cur_scales = scales + ch;
gemv_1x2_int8(cur_biases, cur_scales, (int8_t*) input, cur_kernel, kernel_size, cur_result);
}
if (end_channel & 0x1) {
cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch);
cur_result = (int8_t *) (output + ch);
sum = biases ? *(biases + ch) : 0;
for (int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
int data_i32_0 = round(sum * cur_scales[0]);
if (data_i32_0 > 127)
data_i32_0 = 127;
else if (data_i32_0 < -127)
data_i32_0 = -127;
*cur_result = data_i32_0;
}
}
static void interleave_kernel(const int8_t *kernel, int8_t *kernel_interleaved, int out_chan, int kernel_size) {
int i, j, k;
int8_t *cur_kernel[8];
int8_t *cur_kernel_interleaved;
// interleave 8 kernel
for (i = 0; i < (out_chan & -8); i += 8) {
for (j = 0; j < 8; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for (; i < (out_chan & -2); i += 2) {
for (j = 0; j < 2; j++)
cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j);
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
for (j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if (out_chan & 0x1) {
cur_kernel[0] = (int8_t *) kernel + kernel_size * i;
cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i;
for (k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int int8_fc_kernel_prerun(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param) {
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer) {
int mem_size = num_output * kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer) {
int mem_size = kernel_align;
void *mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
int8_t *filter_data = (int8_t *) filter_tensor->data;
interleave_kernel(filter_data, (int8_t *) priv_info->interleave_buffer, num_output,
kernel_size);
return 0;
}
int int8_fc_kernel_run(struct ir_tensor *input_tensor, \
struct ir_tensor *filter_tensor, \
struct ir_tensor *bias_tensor, \
struct ir_tensor *output_tensor, \
struct fc_priv_info *priv_info, \
struct fc_param *param, \
int num_thread, int cpu_affinity) {
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
int8_t *input = (int8_t *) input_tensor->data;
int8_t *output = (int8_t *) output_tensor->data;
int8_t *weight = (int8_t *) priv_info->interleave_buffer;
int32_t *biases = NULL;
if (bias_tensor)
biases = (int32_t *) bias_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
float *weight_scales = filter_tensor->scale_list;
float *requant_scales = (float *) malloc(out_num * sizeof(float));
for (int i = 0; i < out_num; i++)
requant_scales[i] = (input_scale * weight_scales[i]) / output_scale;
int out_num_8 = out_num & ~7;
for (int i = 0; i < input_tensor->dims[0]; i++) {
int8_t *cur_input = input + i * kernel_size;
int8_t *cur_output = output + i * out_num;
gemv1x8(cur_input, cur_output, weight, biases, requant_scales, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if (out_num & 0x7)
gemv1x2(cur_input, cur_output, weight, biases, requant_scales, kernel_size, out_num_8,out_num,num_thread, cpu_affinity);
}
return 0;
}
|
GB_unaryop__ainv_int32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_int8
// op(A') function: GB_tran__ainv_int32_int8
// C type: int32_t
// A type: int8_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_int8
(
int32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_axpy.c | #include "rex_kmp.h"
char OUT__3__9500__axpy_ompacc__67__id__ = 0;
struct __tgt_offload_entry OUT__3__9500__axpy_ompacc__67__omp_offload_entry__ __attribute__((section("omp_offloading_entries"))) = {((void *)(&OUT__3__9500__axpy_ompacc__67__id__)), "OUT__3__9500__axpy_ompacc__67__kernel__", 0, 0, 0};
char OUT__2__9500__axpy_ompacc__70__id__ = 0;
struct __tgt_offload_entry OUT__2__9500__axpy_ompacc__70__omp_offload_entry__ __attribute__((section("omp_offloading_entries"))) = {((void *)(&OUT__2__9500__axpy_ompacc__70__id__)), "OUT__2__9500__axpy_ompacc__70__kernel__", 0, 0, 0};
char OUT__1__9500__axpy_ompacc__75__id__ = 0;
struct __tgt_offload_entry OUT__1__9500__axpy_ompacc__75__omp_offload_entry__ __attribute__((section("omp_offloading_entries"))) = {((void *)(&OUT__1__9500__axpy_ompacc__75__id__)), "OUT__1__9500__axpy_ompacc__75__kernel__", 0, 0, 0};
// Experimental test input for Accelerator directives
// simplest scalar*vector operations
// Liao 1/15/2013
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#define NUM_RUNS 10
double read_timer_ms()
{
struct timeb tm;
ftime(&tm);
return ((double )tm . time) * 1000.0 + ((double )tm . millitm);
}
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(double *A,int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(double *A,int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = ((double )(drand48()));
}
}
/*serial version */
void axpy(double *x,double *y,long n,double a)
{
int i;
for (i = 0; i < n; i++) {
y[i] += a * x[i];
}
}
/* compare two arrays and return percentage of difference */
double check(double *A,double *B,int n)
{
int i;
double diffsum = 0.0;
double sum = 0.0;
for (i = 0; i < n; i++) {
diffsum += fabs(A[i] - B[i]);
sum += fabs(B[i]);
}
return diffsum / sum;
}
void axpy_ompacc(double *x,double *y,int n,double a)
{
int i;
/* //implementation of the following omp target region
#pragma omp target teams distribute parallel for device (0) map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
*/
double *_dev_x;
int _dev_x_size[1] = {n};
int _dev_x_offset[1] = {0};
int _dev_x_Dim[1] = {n};
double *_dev_y;
int _dev_y_size[1] = {n};
int _dev_y_offset[1] = {0};
int _dev_y_Dim[1] = {n};
{
/* Launch CUDA kernel ... */
int _threads_per_block_ = 1;
int _num_blocks_ = 1;
int64_t __device_id = 0;
void *__host_ptr = (void *)(&OUT__3__9500__axpy_ompacc__67__id__);
void *__args_base[] = {&n, &a, &i, x, y};
void *__args[] = {&n, &a, &i, x + 0, y + 0};
int64_t __arg_sizes[] = {((int64_t )(sizeof(int ))), ((int64_t )(sizeof(double ))), ((int64_t )(sizeof(int ))), ((int64_t )(sizeof(double ) * n)), ((int64_t )(sizeof(double ) * n))};
int64_t __arg_types[] = {33, 33, 33, 32, 35};
int32_t __arg_num = 5;
__tgt_target_teams(OUT__3__9500__axpy_ompacc__67__id__,__host_ptr,__arg_num,__args_base,__args,__arg_sizes,__arg_types,_threads_per_block_,_num_blocks_);
}
}
int main(int argc,char *argv[])
{
int status = 0;
int n;
double *y_ompacc;
double *y;
double *x;
double a = 123.456;
n = 1 << 23;
// 2^23, 8 million
fprintf(stderr,"Usage: axpy <n>, where the problem size is 2^n.\n");
if (argc >= 2) {
n = 1 << atoi(argv[1]);
}
y_ompacc = ((double *)(malloc(n * sizeof(double ))));
y = ((double *)(malloc(n * sizeof(double ))));
x = ((double *)(malloc(n * sizeof(double ))));
srand48((1 << 12));
init(x,n);
init(y_ompacc,n);
memcpy(y,y_ompacc,n * sizeof(double ));
axpy(x,y,n,a);
int i;
double elapsed = read_timer_ms();
for (i = 0; i < 10; i++)
axpy_ompacc(x,y,n,a);
elapsed = (read_timer_ms() - elapsed) / 10;
double checkresult = check(y_ompacc,y,n);
fprintf(stderr,"axpy(%d): checksum: %g, time: %0.2fms\n",n,checkresult,elapsed);
//assert (checkresult < 1.0e-10);
printf("%g",elapsed);
free(y_ompacc);
free(y);
free(x);
return 0;
}
|
GB_unaryop__minv_fp64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint64
// op(A') function: GB_tran__minv_fp64_uint64
// C type: double
// A type: uint64_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint64
(
double *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint64)
// A*D function (colscale): GB (_AxD__iseq_uint64)
// D*A function (rowscale): GB (_DxB__iseq_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint64)
// C=scalar+B GB (_bind1st__iseq_uint64)
// C=scalar+B' GB (_bind1st_tran__iseq_uint64)
// C=A+scalar GB (_bind2nd__iseq_uint64)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT64 || GxB_NO_ISEQ_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cont_kriging.h | /*
Copyright 2009 HPGL Team
This file is part of HPGL (High Perfomance Geostatistics Library).
HPGL is free software: you can redistribute it and/or modify it under the terms of the BSD License.
You should have received a copy of the BSD License along with HPGL.
*/
#ifndef CONT_KRIGING_H_INCLUDED_LJLDFJVW450934VDV9ONV09NOASU92N34FOKLSDFGP3Q98SXNP
#define CONT_KRIGING_H_INCLUDED_LJLDFJVW450934VDV9ONV09NOASU92N34FOKLSDFGP3Q98SXNP
#include <combiner.h>
#include "covariance_field.h"
#include <progress_reporter.h>
#include <kriging_stats.h>
#include <omp.h>
#include "typedefs.h"
#include "select.h"
#include "precalculated_covariance.h"
#include "kriging_interpolation.h"
#include "neighbourhood_lookup.h"
#include "is_informed_predicate.h"
#include "cov_model.h"
namespace hpgl
{
/*!
* Enumerationg specifing ways of handling kriging errors (absence of neighbours or singularity of matrix)
*/
enum kriging_failure_handling
{
mean_on_failure, //!< Put the mean value
undefined_on_failure //!< Leave node undefined
};
/*!
* Generic kriging alghorithm for continuous data. Uses OpenMP.
*
*/
template<
typename grid_t, //!< Grid-With-Neighbour-Lookup concept
typename data_t, //!< Property concept
typename means_t, //!< Mean provider concept
typename covariances_t, //!< Covariance Model Concept
typename weight_calculator_t //!< Weight-Calculator concept.
>
void cont_kriging(
const data_t & input_property, //!< input data
const grid_t & grid,
const neighbourhood_param_t /*ok_params_t*/ & params, //!< parameters of neighbourhood search
const means_t & means, //!< mean values of data
const covariances_t & cov, //!< covariance model
const weight_calculator_t & wc, //!< Weight calculator specifies methods of calculating weights (OK, SK or LVM Kriging)
data_t & output_property, //!< resulting data
progress_reporter_t & report, //!< object for tracking progress
kriging_stats_t & stats, //!< returns some statistics of calculation
kriging_failure_handling fh = mean_on_failure //!< Way of handling kriging errors (absence of neighbours, singularity).
)
{
assert(input_property.size() == output_property.size());
assert(grid.size() == input_property.size());
double sum = 0;
stats.m_points_calculated = 0;
stats.m_points_without_neighbours = 0;
stats.m_mean = 0;
typedef indexed_neighbour_lookup_t<grid_t, covariances_t> nl_t;
nl_t neighbour_lookup(&grid, &cov, params);
for (node_index_t node = 0; node < input_property.size(); ++node)
{
if (input_property.is_informed(node))
{
neighbour_lookup.add_node(node);
}
}
report.start();
node_index_t idx_end = input_property.size();
unsigned long points_calculated = 0;
unsigned long points_without_neighbours = 0;
#pragma omp parallel
{
#pragma omp for reduction(+: points_calculated) reduction(+: points_without_neighbours) reduction(+: sum)
for(node_index_t idx = 0; idx < idx_end; ++idx)
{
if (!input_property.is_informed(idx))
{
cont_value_t value;
switch(kriging_interpolation(input_property, is_informed_predicate_t<data_t>(input_property), idx, cov, means, neighbour_lookup, wc, value))
{
case KI_SUCCESS:
output_property.set_at(idx, value);
points_calculated++;
sum += value;
break;
case KI_NO_NEIGHBOURS:
points_without_neighbours++;
if (fh == mean_on_failure)
output_property.set_at(idx, means[idx]);
sum += means[idx];
break;
case KI_SINGULARITY:
if (fh == mean_on_failure)
output_property.set_at(idx, means[idx]);
sum += means[idx];
break;
}
}
else
{
output_property.set_at(idx, input_property.get_at(idx));
}
#pragma omp critical
{
report.next_lap();
}
}
}
report.stop();
stats.m_points_calculated = points_calculated;
stats.m_points_without_neighbours = points_without_neighbours;
stats.m_mean = sum / output_property.size();
stats.m_speed_nps = report.iterations_per_second();
std::cout << "\nDone. Average speed: " << report.iterations_per_second() << " point/sec." << std::endl;
}
}
#endif //CONT_KRIGING_H_INCLUDED_LJLDFJVW450934VDV9ONV09NOASU92N34FOKLSDFGP3Q98SXNP
|
estimate_gamma_m.c | /* Generated by Cython 0.29.15 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [],
"name": "estimate_gamma_m",
"sources": [
"/Users/huanh0b/Desktop/ModelDependence/submit-code/Combine-CMIP5/Combine-CMIP5/src/estimate_gamma_m.pyx"
]
},
"module_name": "estimate_gamma_m"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_15"
#define CYTHON_HEX_VERSION 0x001D0FF0
#define CYTHON_FUTURE_DIVISION 1
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__estimate_gamma_m
#define __PYX_HAVE_API__estimate_gamma_m
/* Early includes */
#include <math.h>
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"estimate_gamma_m.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "scipy/linalg/cython_lapack.pxd":15
* # The original libraries should be linked directly.
*
* ctypedef float s # <<<<<<<<<<<<<<
* ctypedef double d
* ctypedef float complex c
*/
typedef float __pyx_t_5scipy_6linalg_13cython_lapack_s;
/* "scipy/linalg/cython_lapack.pxd":16
*
* ctypedef float s
* ctypedef double d # <<<<<<<<<<<<<<
* ctypedef float complex c
* ctypedef double complex z
*/
typedef double __pyx_t_5scipy_6linalg_13cython_lapack_d;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "scipy/linalg/cython_lapack.pxd":22
* # Function pointer type declarations for
* # gees and gges families of functions.
* ctypedef bint cselect1(c*) # <<<<<<<<<<<<<<
* ctypedef bint cselect2(c*, c*)
* ctypedef bint dselect2(d*, d*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_cselect1(__pyx_t_float_complex *);
/* "scipy/linalg/cython_lapack.pxd":23
* # gees and gges families of functions.
* ctypedef bint cselect1(c*)
* ctypedef bint cselect2(c*, c*) # <<<<<<<<<<<<<<
* ctypedef bint dselect2(d*, d*)
* ctypedef bint dselect3(d*, d*, d*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_cselect2(__pyx_t_float_complex *, __pyx_t_float_complex *);
/* "scipy/linalg/cython_lapack.pxd":24
* ctypedef bint cselect1(c*)
* ctypedef bint cselect2(c*, c*)
* ctypedef bint dselect2(d*, d*) # <<<<<<<<<<<<<<
* ctypedef bint dselect3(d*, d*, d*)
* ctypedef bint sselect2(s*, s*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_dselect2(__pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *);
/* "scipy/linalg/cython_lapack.pxd":25
* ctypedef bint cselect2(c*, c*)
* ctypedef bint dselect2(d*, d*)
* ctypedef bint dselect3(d*, d*, d*) # <<<<<<<<<<<<<<
* ctypedef bint sselect2(s*, s*)
* ctypedef bint sselect3(s*, s*, s*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_dselect3(__pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, __pyx_t_5scipy_6linalg_13cython_lapack_d *);
/* "scipy/linalg/cython_lapack.pxd":26
* ctypedef bint dselect2(d*, d*)
* ctypedef bint dselect3(d*, d*, d*)
* ctypedef bint sselect2(s*, s*) # <<<<<<<<<<<<<<
* ctypedef bint sselect3(s*, s*, s*)
* ctypedef bint zselect1(z*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_sselect2(__pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *);
/* "scipy/linalg/cython_lapack.pxd":27
* ctypedef bint dselect3(d*, d*, d*)
* ctypedef bint sselect2(s*, s*)
* ctypedef bint sselect3(s*, s*, s*) # <<<<<<<<<<<<<<
* ctypedef bint zselect1(z*)
* ctypedef bint zselect2(z*, z*)
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_sselect3(__pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *, __pyx_t_5scipy_6linalg_13cython_lapack_s *);
/* "scipy/linalg/cython_lapack.pxd":28
* ctypedef bint sselect2(s*, s*)
* ctypedef bint sselect3(s*, s*, s*)
* ctypedef bint zselect1(z*) # <<<<<<<<<<<<<<
* ctypedef bint zselect2(z*, z*)
*
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_zselect1(__pyx_t_double_complex *);
/* "scipy/linalg/cython_lapack.pxd":29
* ctypedef bint sselect3(s*, s*, s*)
* ctypedef bint zselect1(z*)
* ctypedef bint zselect2(z*, z*) # <<<<<<<<<<<<<<
*
* cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil
*/
typedef int __pyx_t_5scipy_6linalg_13cython_lapack_zselect2(__pyx_t_double_complex *, __pyx_t_double_complex *);
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *, int writable_flag);
/* CStringEquals.proto */
static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* FunctionImport.proto */
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'libc.math' */
/* Module declarations from 'scipy.linalg.cython_lapack' */
static void (*__pyx_f_5scipy_6linalg_13cython_lapack_dpotrf)(char *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *); /*proto*/
static void (*__pyx_f_5scipy_6linalg_13cython_lapack_dpotri)(char *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *); /*proto*/
static void (*__pyx_f_5scipy_6linalg_13cython_lapack_dpotrs)(char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *); /*proto*/
/* Module declarations from 'estimate_gamma_m' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
#define __Pyx_MODULE_NAME "estimate_gamma_m"
extern int __pyx_module_is_main_estimate_gamma_m;
int __pyx_module_is_main_estimate_gamma_m = 0;
/* Implementation of 'estimate_gamma_m' */
static PyObject *__pyx_builtin_max;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_M[] = "M";
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_m[] = "m";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_Rm[] = "Rm";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_RFm[] = "RFm";
static const char __pyx_k_RHm[] = "RHm";
static const char __pyx_k__20[] = "*";
static const char __pyx_k_cov[] = "cov";
static const char __pyx_k_max[] = "max";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_nsq[] = "nsq";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_spl[] = "spl";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dice[] = "dice";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_dist[] = "dist";
static const char __pyx_k_info[] = "info";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_phiFm[] = "phiFm";
static const char __pyx_k_phiHm[] = "phiHm";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_scipy[] = "scipy";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_state[] = "state";
static const char __pyx_k_tools[] = "tools";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_linalg[] = "linalg";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_offset[] = "offset";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_random[] = "random";
static const char __pyx_k_rateFm[] = "rateFm";
static const char __pyx_k_rateHm[] = "rateHm";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_flatten[] = "flatten";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_gammaFm[] = "gammaFm";
static const char __pyx_k_gammaHm[] = "gammaHm";
static const char __pyx_k_logProb[] = "logProb";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_uniform[] = "uniform";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_XFmrDiff[] = "XFmrDiff";
static const char __pyx_k_XHmrDiff[] = "XHmrDiff";
static const char __pyx_k_bGammaFm[] = "bGammaFm";
static const char __pyx_k_bGammaHm[] = "bGammaHm";
static const char __pyx_k_covMatFm[] = "covMatFm";
static const char __pyx_k_covMatHm[] = "covMatHm";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_logProbOld[] = "logProbOld";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_gamma_m_New[] = "gamma_m_New";
static const char __pyx_k_invCovMatFm[] = "invCovMatFm";
static const char __pyx_k_invCovMatHm[] = "invCovMatHm";
static const char __pyx_k_covMat_m_New[] = "covMat_m_New";
static const char __pyx_k_logProbPart1[] = "logProbPart1";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_acceptGammaFm[] = "acceptGammaFm";
static const char __pyx_k_acceptGammaHm[] = "acceptGammaHm";
static const char __pyx_k_normalvariate[] = "normalvariate";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_gamma_m_New_log[] = "gamma_m_New_log";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_estimate_gamma_m[] = "estimate_gamma_m";
static const char __pyx_k_XFmrDiff_original[] = "XFmrDiff_original";
static const char __pyx_k_XHmrDiff_original[] = "XHmrDiff_original";
static const char __pyx_k_covMat_m_New_save[] = "covMat_m_New_save";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_logGammaFmProbPart1[] = "logGammaFmProbPart1";
static const char __pyx_k_logGammaHmProbPart1[] = "logGammaHmProbPart1";
static const char __pyx_k_estimate_gamma_m_pyx[] = "estimate_gamma_m.pyx";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_M;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_RFm;
static PyObject *__pyx_n_s_RHm;
static PyObject *__pyx_n_s_Rm;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_XFmrDiff;
static PyObject *__pyx_n_s_XFmrDiff_original;
static PyObject *__pyx_n_s_XHmrDiff;
static PyObject *__pyx_n_s_XHmrDiff_original;
static PyObject *__pyx_n_s__20;
static PyObject *__pyx_n_s_acceptGammaFm;
static PyObject *__pyx_n_s_acceptGammaHm;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_bGammaFm;
static PyObject *__pyx_n_s_bGammaHm;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_cov;
static PyObject *__pyx_n_s_covMatFm;
static PyObject *__pyx_n_s_covMatHm;
static PyObject *__pyx_n_s_covMat_m_New;
static PyObject *__pyx_n_s_covMat_m_New_save;
static PyObject *__pyx_n_s_dice;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dist;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_estimate_gamma_m;
static PyObject *__pyx_kp_s_estimate_gamma_m_pyx;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_flatten;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_gammaFm;
static PyObject *__pyx_n_s_gammaHm;
static PyObject *__pyx_n_s_gamma_m_New;
static PyObject *__pyx_n_s_gamma_m_New_log;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_info;
static PyObject *__pyx_n_s_invCovMatFm;
static PyObject *__pyx_n_s_invCovMatHm;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_linalg;
static PyObject *__pyx_n_s_logGammaFmProbPart1;
static PyObject *__pyx_n_s_logGammaHmProbPart1;
static PyObject *__pyx_n_s_logProb;
static PyObject *__pyx_n_s_logProbOld;
static PyObject *__pyx_n_s_logProbPart1;
static PyObject *__pyx_n_s_m;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_max;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_normalvariate;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_nsq;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_offset;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_phiFm;
static PyObject *__pyx_n_s_phiHm;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_random;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_rateFm;
static PyObject *__pyx_n_s_rateHm;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_scipy;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_spl;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_state;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_tools;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_uniform;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_16estimate_gamma_m_estimate_gamma_m(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_bGammaHm, PyObject *__pyx_v_bGammaFm); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_float_0_05;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__29;
/* Late includes */
/* "estimate_gamma_m.pyx":19
*
*
* def estimate_gamma_m(bGammaHm=0.05,bGammaFm=0.05): # <<<<<<<<<<<<<<
*
* cdef double[:,:] dist = state.dist
*/
/* Python wrapper */
static PyObject *__pyx_pw_16estimate_gamma_m_1estimate_gamma_m(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_16estimate_gamma_m_1estimate_gamma_m = {"estimate_gamma_m", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_16estimate_gamma_m_1estimate_gamma_m, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_16estimate_gamma_m_1estimate_gamma_m(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_bGammaHm = 0;
PyObject *__pyx_v_bGammaFm = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("estimate_gamma_m (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bGammaHm,&__pyx_n_s_bGammaFm,0};
PyObject* values[2] = {0,0};
values[0] = ((PyObject *)__pyx_float_0_05);
values[1] = ((PyObject *)__pyx_float_0_05);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bGammaHm);
if (value) { values[0] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bGammaFm);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "estimate_gamma_m") < 0)) __PYX_ERR(0, 19, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_bGammaHm = values[0];
__pyx_v_bGammaFm = values[1];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("estimate_gamma_m", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("estimate_gamma_m.estimate_gamma_m", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_16estimate_gamma_m_estimate_gamma_m(__pyx_self, __pyx_v_bGammaHm, __pyx_v_bGammaFm);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_16estimate_gamma_m_estimate_gamma_m(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_bGammaHm, PyObject *__pyx_v_bGammaFm) {
__Pyx_memviewslice __pyx_v_dist = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_Rm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_XHmrDiff = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_XHmrDiff_original = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_XFmrDiff = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_XFmrDiff_original = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_logGammaHmProbPart1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_covMatHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_invCovMatHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_acceptGammaHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_rateHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_phiHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_gammaHm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_logGammaFmProbPart1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_covMatFm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_invCovMatFm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_acceptGammaFm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_rateFm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_phiFm = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_gammaFm = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_M;
int __pyx_v_n;
int __pyx_v_m;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_info;
int __pyx_v_nsq;
int __pyx_v_offset;
__Pyx_memviewslice __pyx_v_covMat_m_New = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_covMat_m_New_save = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_gamma_m_New = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_logProbPart1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_logProb = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_logProbOld = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_dice = { 0, 0, { 0 }, { 0 }, { 0 } };
double __pyx_v_cov;
PyObject *__pyx_v_gamma_m_New_log = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_memviewslice __pyx_t_3 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
int __pyx_t_11;
double __pyx_t_12;
int __pyx_t_13;
PyObject *__pyx_t_14 = NULL;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
Py_ssize_t __pyx_t_24;
Py_ssize_t __pyx_t_25;
Py_ssize_t __pyx_t_26;
double __pyx_t_27;
Py_ssize_t __pyx_t_28;
Py_ssize_t __pyx_t_29;
Py_ssize_t __pyx_t_30;
Py_ssize_t __pyx_t_31;
Py_ssize_t __pyx_t_32;
Py_ssize_t __pyx_t_33;
Py_ssize_t __pyx_t_34;
Py_ssize_t __pyx_t_35;
Py_ssize_t __pyx_t_36;
Py_ssize_t __pyx_t_37;
Py_ssize_t __pyx_t_38;
Py_ssize_t __pyx_t_39;
Py_ssize_t __pyx_t_40;
Py_ssize_t __pyx_t_41;
Py_ssize_t __pyx_t_42;
Py_ssize_t __pyx_t_43;
Py_ssize_t __pyx_t_44;
Py_ssize_t __pyx_t_45;
Py_ssize_t __pyx_t_46;
Py_ssize_t __pyx_t_47;
Py_ssize_t __pyx_t_48;
Py_ssize_t __pyx_t_49;
Py_ssize_t __pyx_t_50;
Py_ssize_t __pyx_t_51;
Py_ssize_t __pyx_t_52;
Py_ssize_t __pyx_t_53;
Py_ssize_t __pyx_t_54;
Py_ssize_t __pyx_t_55;
Py_ssize_t __pyx_t_56;
Py_ssize_t __pyx_t_57;
Py_ssize_t __pyx_t_58;
Py_ssize_t __pyx_t_59;
Py_ssize_t __pyx_t_60;
Py_ssize_t __pyx_t_61;
Py_ssize_t __pyx_t_62;
Py_ssize_t __pyx_t_63;
Py_ssize_t __pyx_t_64;
Py_ssize_t __pyx_t_65;
Py_ssize_t __pyx_t_66;
Py_ssize_t __pyx_t_67;
Py_ssize_t __pyx_t_68;
Py_ssize_t __pyx_t_69;
Py_ssize_t __pyx_t_70;
Py_ssize_t __pyx_t_71;
Py_ssize_t __pyx_t_72;
Py_ssize_t __pyx_t_73;
Py_ssize_t __pyx_t_74;
Py_ssize_t __pyx_t_75;
Py_ssize_t __pyx_t_76;
Py_ssize_t __pyx_t_77;
Py_ssize_t __pyx_t_78;
Py_ssize_t __pyx_t_79;
Py_ssize_t __pyx_t_80;
Py_ssize_t __pyx_t_81;
Py_ssize_t __pyx_t_82;
Py_ssize_t __pyx_t_83;
Py_ssize_t __pyx_t_84;
Py_ssize_t __pyx_t_85;
Py_ssize_t __pyx_t_86;
Py_ssize_t __pyx_t_87;
Py_ssize_t __pyx_t_88;
Py_ssize_t __pyx_t_89;
Py_ssize_t __pyx_t_90;
Py_ssize_t __pyx_t_91;
Py_ssize_t __pyx_t_92;
Py_ssize_t __pyx_t_93;
Py_ssize_t __pyx_t_94;
Py_ssize_t __pyx_t_95;
Py_ssize_t __pyx_t_96;
Py_ssize_t __pyx_t_97;
Py_ssize_t __pyx_t_98;
Py_ssize_t __pyx_t_99;
Py_ssize_t __pyx_t_100;
Py_ssize_t __pyx_t_101;
Py_ssize_t __pyx_t_102;
Py_ssize_t __pyx_t_103;
Py_ssize_t __pyx_t_104;
Py_ssize_t __pyx_t_105;
Py_ssize_t __pyx_t_106;
Py_ssize_t __pyx_t_107;
Py_ssize_t __pyx_t_108;
Py_ssize_t __pyx_t_109;
Py_ssize_t __pyx_t_110;
Py_ssize_t __pyx_t_111;
Py_ssize_t __pyx_t_112;
Py_ssize_t __pyx_t_113;
Py_ssize_t __pyx_t_114;
Py_ssize_t __pyx_t_115;
Py_ssize_t __pyx_t_116;
Py_ssize_t __pyx_t_117;
Py_ssize_t __pyx_t_118;
Py_ssize_t __pyx_t_119;
Py_ssize_t __pyx_t_120;
Py_ssize_t __pyx_t_121;
Py_ssize_t __pyx_t_122;
Py_ssize_t __pyx_t_123;
Py_ssize_t __pyx_t_124;
Py_ssize_t __pyx_t_125;
Py_ssize_t __pyx_t_126;
Py_ssize_t __pyx_t_127;
__Pyx_RefNannySetupContext("estimate_gamma_m", 0);
/* "estimate_gamma_m.pyx":21
* def estimate_gamma_m(bGammaHm=0.05,bGammaFm=0.05):
*
* cdef double[:,:] dist = state.dist # <<<<<<<<<<<<<<
* cdef int[:] Rm = state.RHm;
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_dist); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_3.memview)) __PYX_ERR(0, 21, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_dist = __pyx_t_3;
__pyx_t_3.memview = NULL;
__pyx_t_3.data = NULL;
/* "estimate_gamma_m.pyx":22
*
* cdef double[:,:] dist = state.dist
* cdef int[:] Rm = state.RHm; # <<<<<<<<<<<<<<
*
* cdef double[:] XHmrDiff = state.XHmrDiff.flatten()
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_RHm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_Rm = __pyx_t_4;
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
/* "estimate_gamma_m.pyx":24
* cdef int[:] Rm = state.RHm;
*
* cdef double[:] XHmrDiff = state.XHmrDiff.flatten() # <<<<<<<<<<<<<<
* cdef double[:] XHmrDiff_original = state.XHmrDiff.flatten()
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_XHmrDiff); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_flatten); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_XHmrDiff = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":25
*
* cdef double[:] XHmrDiff = state.XHmrDiff.flatten()
* cdef double[:] XHmrDiff_original = state.XHmrDiff.flatten() # <<<<<<<<<<<<<<
*
* cdef double[:] XFmrDiff = state.XFmrDiff.flatten()
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_XHmrDiff); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_flatten); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 25, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_XHmrDiff_original = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":27
* cdef double[:] XHmrDiff_original = state.XHmrDiff.flatten()
*
* cdef double[:] XFmrDiff = state.XFmrDiff.flatten() # <<<<<<<<<<<<<<
* cdef double[:] XFmrDiff_original = state.XFmrDiff.flatten()
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_XFmrDiff); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_flatten); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 27, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_XFmrDiff = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":28
*
* cdef double[:] XFmrDiff = state.XFmrDiff.flatten()
* cdef double[:] XFmrDiff_original = state.XFmrDiff.flatten() # <<<<<<<<<<<<<<
*
* cdef double[:] logGammaHmProbPart1 = state.logGammaHmProbPart1
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_XFmrDiff); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_flatten); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 28, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_XFmrDiff_original = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":30
* cdef double[:] XFmrDiff_original = state.XFmrDiff.flatten()
*
* cdef double[:] logGammaHmProbPart1 = state.logGammaHmProbPart1 # <<<<<<<<<<<<<<
* cdef double[:,:,:] covMatHm = state.covMatHm
* cdef double[:,:,:] invCovMatHm = state.invCovMatHm
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_logGammaHmProbPart1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_logGammaHmProbPart1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":31
*
* cdef double[:] logGammaHmProbPart1 = state.logGammaHmProbPart1
* cdef double[:,:,:] covMatHm = state.covMatHm # <<<<<<<<<<<<<<
* cdef double[:,:,:] invCovMatHm = state.invCovMatHm
* cdef double[:] acceptGammaHm = state.acceptGammaHm
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_covMatHm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_covMatHm = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "estimate_gamma_m.pyx":32
* cdef double[:] logGammaHmProbPart1 = state.logGammaHmProbPart1
* cdef double[:,:,:] covMatHm = state.covMatHm
* cdef double[:,:,:] invCovMatHm = state.invCovMatHm # <<<<<<<<<<<<<<
* cdef double[:] acceptGammaHm = state.acceptGammaHm
* cdef double[:] rateHm = state.rateHm
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_invCovMatHm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_invCovMatHm = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "estimate_gamma_m.pyx":33
* cdef double[:,:,:] covMatHm = state.covMatHm
* cdef double[:,:,:] invCovMatHm = state.invCovMatHm
* cdef double[:] acceptGammaHm = state.acceptGammaHm # <<<<<<<<<<<<<<
* cdef double[:] rateHm = state.rateHm
* cdef double[:] phiHm = state.phiHm
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_acceptGammaHm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_acceptGammaHm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":34
* cdef double[:,:,:] invCovMatHm = state.invCovMatHm
* cdef double[:] acceptGammaHm = state.acceptGammaHm
* cdef double[:] rateHm = state.rateHm # <<<<<<<<<<<<<<
* cdef double[:] phiHm = state.phiHm
* cdef double[:] gammaHm = state.gammaHm;
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_rateHm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_rateHm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":35
* cdef double[:] acceptGammaHm = state.acceptGammaHm
* cdef double[:] rateHm = state.rateHm
* cdef double[:] phiHm = state.phiHm # <<<<<<<<<<<<<<
* cdef double[:] gammaHm = state.gammaHm;
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_phiHm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_phiHm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":36
* cdef double[:] rateHm = state.rateHm
* cdef double[:] phiHm = state.phiHm
* cdef double[:] gammaHm = state.gammaHm; # <<<<<<<<<<<<<<
*
* cdef double[:] logGammaFmProbPart1 = state.logGammaFmProbPart1
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_gammaHm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_gammaHm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":38
* cdef double[:] gammaHm = state.gammaHm;
*
* cdef double[:] logGammaFmProbPart1 = state.logGammaFmProbPart1 # <<<<<<<<<<<<<<
* cdef double[:,:,:] covMatFm = state.covMatFm
* cdef double[:,:,:] invCovMatFm = state.invCovMatFm
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_logGammaFmProbPart1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_logGammaFmProbPart1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":39
*
* cdef double[:] logGammaFmProbPart1 = state.logGammaFmProbPart1
* cdef double[:,:,:] covMatFm = state.covMatFm # <<<<<<<<<<<<<<
* cdef double[:,:,:] invCovMatFm = state.invCovMatFm
* cdef double[:] acceptGammaFm = state.acceptGammaFm
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_covMatFm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_covMatFm = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "estimate_gamma_m.pyx":40
* cdef double[:] logGammaFmProbPart1 = state.logGammaFmProbPart1
* cdef double[:,:,:] covMatFm = state.covMatFm
* cdef double[:,:,:] invCovMatFm = state.invCovMatFm # <<<<<<<<<<<<<<
* cdef double[:] acceptGammaFm = state.acceptGammaFm
* cdef double[:] rateFm = state.rateFm
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_invCovMatFm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_invCovMatFm = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "estimate_gamma_m.pyx":41
* cdef double[:,:,:] covMatFm = state.covMatFm
* cdef double[:,:,:] invCovMatFm = state.invCovMatFm
* cdef double[:] acceptGammaFm = state.acceptGammaFm # <<<<<<<<<<<<<<
* cdef double[:] rateFm = state.rateFm
* cdef double[:] phiFm = state.phiFm
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_acceptGammaFm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 41, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_acceptGammaFm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":42
* cdef double[:,:,:] invCovMatFm = state.invCovMatFm
* cdef double[:] acceptGammaFm = state.acceptGammaFm
* cdef double[:] rateFm = state.rateFm # <<<<<<<<<<<<<<
* cdef double[:] phiFm = state.phiFm
* cdef double[:] gammaFm = state.gammaFm;
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_rateFm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 42, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_rateFm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":43
* cdef double[:] acceptGammaFm = state.acceptGammaFm
* cdef double[:] rateFm = state.rateFm
* cdef double[:] phiFm = state.phiFm # <<<<<<<<<<<<<<
* cdef double[:] gammaFm = state.gammaFm;
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_phiFm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_phiFm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":44
* cdef double[:] rateFm = state.rateFm
* cdef double[:] phiFm = state.phiFm
* cdef double[:] gammaFm = state.gammaFm; # <<<<<<<<<<<<<<
*
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_gammaFm); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_gammaFm = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":47
*
*
* cdef int M = state.M, n = state.n, m,i,j, info; # <<<<<<<<<<<<<<
* cdef int nsq = n*n;
* cdef int offset = max(Rm) * n;
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_M); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_M = __pyx_t_8;
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_n = __pyx_t_8;
/* "estimate_gamma_m.pyx":48
*
* cdef int M = state.M, n = state.n, m,i,j, info;
* cdef int nsq = n*n; # <<<<<<<<<<<<<<
* cdef int offset = max(Rm) * n;
*
*/
__pyx_v_nsq = (__pyx_v_n * __pyx_v_n);
/* "estimate_gamma_m.pyx":49
* cdef int M = state.M, n = state.n, m,i,j, info;
* cdef int nsq = n*n;
* cdef int offset = max(Rm) * n; # <<<<<<<<<<<<<<
*
* cdef double[:] covMat_m_New = np.zeros(M*nsq);
*/
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_Rm, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_max, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_n); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = PyNumber_Multiply(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 49, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_8 = __Pyx_PyInt_As_int(__pyx_t_5); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_offset = __pyx_t_8;
/* "estimate_gamma_m.pyx":51
* cdef int offset = max(Rm) * n;
*
* cdef double[:] covMat_m_New = np.zeros(M*nsq); # <<<<<<<<<<<<<<
* cdef double[:] covMat_m_New_save = np.zeros(M*nsq);
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int((__pyx_v_M * __pyx_v_nsq)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 51, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_covMat_m_New = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":52
*
* cdef double[:] covMat_m_New = np.zeros(M*nsq);
* cdef double[:] covMat_m_New_save = np.zeros(M*nsq); # <<<<<<<<<<<<<<
*
* cdef double[:] gamma_m_New = np.zeros(M);
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int((__pyx_v_M * __pyx_v_nsq)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_9, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 52, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 52, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_covMat_m_New_save = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":54
* cdef double[:] covMat_m_New_save = np.zeros(M*nsq);
*
* cdef double[:] gamma_m_New = np.zeros(M); # <<<<<<<<<<<<<<
* cdef double[:] logProbPart1 = np.zeros(M), logProb = np.zeros(M), logProbOld = np.zeros(M);
* cdef double[:] dice = np.zeros(M);
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_gamma_m_New = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":55
*
* cdef double[:] gamma_m_New = np.zeros(M);
* cdef double[:] logProbPart1 = np.zeros(M), logProb = np.zeros(M), logProbOld = np.zeros(M); # <<<<<<<<<<<<<<
* cdef double[:] dice = np.zeros(M);
* cdef double cov
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_9, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_logProbPart1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_logProb = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_9, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 55, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_logProbOld = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":56
* cdef double[:] gamma_m_New = np.zeros(M);
* cdef double[:] logProbPart1 = np.zeros(M), logProb = np.zeros(M), logProbOld = np.zeros(M);
* cdef double[:] dice = np.zeros(M); # <<<<<<<<<<<<<<
* cdef double cov
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_9 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_9, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_5, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 56, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dice = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":60
*
* # Update gammaHm
* for m in range(M): # <<<<<<<<<<<<<<
* gamma_m_New_log=random.normalvariate(c_log(state.gammaHm[m]),bGammaHm)
* gamma_m_New[m]=c_exp(gamma_m_New_log)
*/
__pyx_t_8 = __pyx_v_M;
__pyx_t_10 = __pyx_t_8;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_m = __pyx_t_11;
/* "estimate_gamma_m.pyx":61
* # Update gammaHm
* for m in range(M):
* gamma_m_New_log=random.normalvariate(c_log(state.gammaHm[m]),bGammaHm) # <<<<<<<<<<<<<<
* gamma_m_New[m]=c_exp(gamma_m_New_log)
* dice[m] = c_log(random.uniform(0,1))
*/
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_normalvariate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_state); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_gammaHm); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_GetItemInt(__pyx_t_9, __pyx_v_m, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyFloat_FromDouble(log(__pyx_t_12)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_9 = NULL;
__pyx_t_13 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) {
__pyx_t_9 = PyMethod_GET_SELF(__pyx_t_1);
if (likely(__pyx_t_9)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1);
__Pyx_INCREF(__pyx_t_9);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_1, function);
__pyx_t_13 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_2, __pyx_v_bGammaHm};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_13, 2+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) {
PyObject *__pyx_temp[3] = {__pyx_t_9, __pyx_t_2, __pyx_v_bGammaHm};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-__pyx_t_13, 2+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else
#endif
{
__pyx_t_14 = PyTuple_New(2+__pyx_t_13); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
if (__pyx_t_9) {
__Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_9); __pyx_t_9 = NULL;
}
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_14, 0+__pyx_t_13, __pyx_t_2);
__Pyx_INCREF(__pyx_v_bGammaHm);
__Pyx_GIVEREF(__pyx_v_bGammaHm);
PyTuple_SET_ITEM(__pyx_t_14, 1+__pyx_t_13, __pyx_v_bGammaHm);
__pyx_t_2 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_14, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF_SET(__pyx_v_gamma_m_New_log, __pyx_t_5);
__pyx_t_5 = 0;
/* "estimate_gamma_m.pyx":62
* for m in range(M):
* gamma_m_New_log=random.normalvariate(c_log(state.gammaHm[m]),bGammaHm)
* gamma_m_New[m]=c_exp(gamma_m_New_log) # <<<<<<<<<<<<<<
* dice[m] = c_log(random.uniform(0,1))
*
*/
__pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_v_gamma_m_New_log); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 62, __pyx_L1_error)
__pyx_t_15 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_15 * __pyx_v_gamma_m_New.strides[0]) )) = exp(__pyx_t_12);
/* "estimate_gamma_m.pyx":63
* gamma_m_New_log=random.normalvariate(c_log(state.gammaHm[m]),bGammaHm)
* gamma_m_New[m]=c_exp(gamma_m_New_log)
* dice[m] = c_log(random.uniform(0,1)) # <<<<<<<<<<<<<<
*
* with nogil:
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_uniform); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_12 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_12 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_16 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_dice.data + __pyx_t_16 * __pyx_v_dice.strides[0]) )) = log(__pyx_t_12);
}
/* "estimate_gamma_m.pyx":65
* dice[m] = c_log(random.uniform(0,1))
*
* with nogil: # <<<<<<<<<<<<<<
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "estimate_gamma_m.pyx":66
*
* with nogil:
* for m in prange(M): # <<<<<<<<<<<<<<
* if gamma_m_New[m] < 1e6 :
* for i in range(n):
*/
__pyx_t_8 = __pyx_v_M;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = ((double)__PYX_NAN());
int __pyx_parallel_temp1 = ((int)0xbad0bad0);
int __pyx_parallel_temp2 = ((int)0xbad0bad0);
int __pyx_parallel_temp3 = ((int)0xbad0bad0);
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_11 = (__pyx_t_8 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_11 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_12, __pyx_t_13, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_cov) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) firstprivate(__pyx_v_m) lastprivate(__pyx_v_m)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){
if (__pyx_parallel_why < 2)
{
__pyx_v_m = (int)(0 + 1 * __pyx_t_10);
/* Initialize private variables to invalid values */
__pyx_v_cov = ((double)__PYX_NAN());
__pyx_v_i = ((int)0xbad0bad0);
__pyx_v_j = ((int)0xbad0bad0);
/* "estimate_gamma_m.pyx":67
* with nogil:
* for m in prange(M):
* if gamma_m_New[m] < 1e6 : # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(n):
*/
__pyx_t_17 = __pyx_v_m;
__pyx_t_18 = (((*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_17 * __pyx_v_gamma_m_New.strides[0]) ))) < 1e6) != 0);
if (__pyx_t_18) {
/* "estimate_gamma_m.pyx":68
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":69
* if gamma_m_New[m] < 1e6 :
* for i in range(n):
* for j in range(n): # <<<<<<<<<<<<<<
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":70
* for i in range(n):
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]); # <<<<<<<<<<<<<<
* covMat_m_New[m*nsq+i*n+j] = cov
* covMat_m_New_save[m*nsq+i*n+j] = cov
*/
__pyx_t_24 = __pyx_v_i;
__pyx_t_25 = __pyx_v_j;
__pyx_t_12 = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_dist.data + __pyx_t_24 * __pyx_v_dist.strides[0]) ) + __pyx_t_25 * __pyx_v_dist.strides[1]) ))));
__pyx_t_26 = __pyx_v_m;
__pyx_t_27 = (*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_26 * __pyx_v_gamma_m_New.strides[0]) )));
if (unlikely(__pyx_t_27 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 70, __pyx_L10_error)
}
__pyx_v_cov = exp((__pyx_t_12 / __pyx_t_27));
/* "estimate_gamma_m.pyx":71
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov # <<<<<<<<<<<<<<
* covMat_m_New_save[m*nsq+i*n+j] = cov
*
*/
__pyx_t_28 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_28 * __pyx_v_covMat_m_New.strides[0]) )) = __pyx_v_cov;
/* "estimate_gamma_m.pyx":72
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov
* covMat_m_New_save[m*nsq+i*n+j] = cov # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_29 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New_save.data + __pyx_t_29 * __pyx_v_covMat_m_New_save.strides[0]) )) = __pyx_v_cov;
}
}
/* "estimate_gamma_m.pyx":75
*
*
* dpotrf('L',&n,&covMat_m_New[m*nsq],&n,&info); # <<<<<<<<<<<<<<
*
* for i in range(n):
*/
__pyx_t_30 = (__pyx_v_m * __pyx_v_nsq);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotrf(((char *)"L"), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_30 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":77
* dpotrf('L',&n,&covMat_m_New[m*nsq],&n,&info);
*
* for i in range(n): # <<<<<<<<<<<<<<
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]);
* logProbPart1[m] *= -1 * Rm[m];
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":78
*
* for i in range(n):
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]); # <<<<<<<<<<<<<<
* logProbPart1[m] *= -1 * Rm[m];
*
*/
__pyx_t_31 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_i);
__pyx_t_32 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_32 * __pyx_v_logProbPart1.strides[0]) )) += log((*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_31 * __pyx_v_covMat_m_New.strides[0]) ))));
}
/* "estimate_gamma_m.pyx":79
* for i in range(n):
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]);
* logProbPart1[m] *= -1 * Rm[m]; # <<<<<<<<<<<<<<
*
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XHmrDiff[m*offset],&n,&info);
*/
__pyx_t_33 = __pyx_v_m;
__pyx_t_34 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_34 * __pyx_v_logProbPart1.strides[0]) )) *= (-1L * (*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_33 * __pyx_v_Rm.strides[0]) ))));
/* "estimate_gamma_m.pyx":81
* logProbPart1[m] *= -1 * Rm[m];
*
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XHmrDiff[m*offset],&n,&info); # <<<<<<<<<<<<<<
*
* for i in range(Rm[m]*n):
*/
__pyx_t_35 = __pyx_v_m;
__pyx_t_36 = (__pyx_v_m * __pyx_v_nsq);
__pyx_t_37 = (__pyx_v_m * __pyx_v_offset);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotrs(((char *)"L"), (&__pyx_v_n), (&(*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_35 * __pyx_v_Rm.strides[0]) )))), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_36 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_XHmrDiff.data + __pyx_t_37 * __pyx_v_XHmrDiff.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":83
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XHmrDiff[m*offset],&n,&info);
*
* for i in range(Rm[m]*n): # <<<<<<<<<<<<<<
* logProb[m] += XHmrDiff[m*offset+i]*XHmrDiff_original[m*offset+i]
*
*/
__pyx_t_38 = __pyx_v_m;
__pyx_t_13 = ((*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_38 * __pyx_v_Rm.strides[0]) ))) * __pyx_v_n);
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":84
*
* for i in range(Rm[m]*n):
* logProb[m] += XHmrDiff[m*offset+i]*XHmrDiff_original[m*offset+i] # <<<<<<<<<<<<<<
*
* logProb[m] = logProbPart1[m] + logProb[m]*phiHm[m]*(-0.5)
*/
__pyx_t_39 = ((__pyx_v_m * __pyx_v_offset) + __pyx_v_i);
__pyx_t_40 = ((__pyx_v_m * __pyx_v_offset) + __pyx_v_i);
__pyx_t_41 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_41 * __pyx_v_logProb.strides[0]) )) += ((*((double *) ( /* dim=0 */ (__pyx_v_XHmrDiff.data + __pyx_t_39 * __pyx_v_XHmrDiff.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_XHmrDiff_original.data + __pyx_t_40 * __pyx_v_XHmrDiff_original.strides[0]) ))));
}
/* "estimate_gamma_m.pyx":86
* logProb[m] += XHmrDiff[m*offset+i]*XHmrDiff_original[m*offset+i]
*
* logProb[m] = logProbPart1[m] + logProb[m]*phiHm[m]*(-0.5) # <<<<<<<<<<<<<<
*
* logProbOld[m] = logGammaHmProbPart1[m] + rateHm[m] * phiHm[m] + c_log(gammaHm[m]);
*/
__pyx_t_42 = __pyx_v_m;
__pyx_t_43 = __pyx_v_m;
__pyx_t_44 = __pyx_v_m;
__pyx_t_45 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_45 * __pyx_v_logProb.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_42 * __pyx_v_logProbPart1.strides[0]) ))) + (((*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_43 * __pyx_v_logProb.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_phiHm.data + __pyx_t_44 * __pyx_v_phiHm.strides[0]) )))) * -0.5));
/* "estimate_gamma_m.pyx":88
* logProb[m] = logProbPart1[m] + logProb[m]*phiHm[m]*(-0.5)
*
* logProbOld[m] = logGammaHmProbPart1[m] + rateHm[m] * phiHm[m] + c_log(gammaHm[m]); # <<<<<<<<<<<<<<
*
* if dice[m] < (logProb[m]-logProbOld[m]):
*/
__pyx_t_46 = __pyx_v_m;
__pyx_t_47 = __pyx_v_m;
__pyx_t_48 = __pyx_v_m;
__pyx_t_49 = __pyx_v_m;
__pyx_t_50 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbOld.data + __pyx_t_50 * __pyx_v_logProbOld.strides[0]) )) = (((*((double *) ( /* dim=0 */ (__pyx_v_logGammaHmProbPart1.data + __pyx_t_46 * __pyx_v_logGammaHmProbPart1.strides[0]) ))) + ((*((double *) ( /* dim=0 */ (__pyx_v_rateHm.data + __pyx_t_47 * __pyx_v_rateHm.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_phiHm.data + __pyx_t_48 * __pyx_v_phiHm.strides[0]) ))))) + log((*((double *) ( /* dim=0 */ (__pyx_v_gammaHm.data + __pyx_t_49 * __pyx_v_gammaHm.strides[0]) )))));
/* "estimate_gamma_m.pyx":90
* logProbOld[m] = logGammaHmProbPart1[m] + rateHm[m] * phiHm[m] + c_log(gammaHm[m]);
*
* if dice[m] < (logProb[m]-logProbOld[m]): # <<<<<<<<<<<<<<
* gammaHm[m] = gamma_m_New[m]
*
*/
__pyx_t_51 = __pyx_v_m;
__pyx_t_52 = __pyx_v_m;
__pyx_t_53 = __pyx_v_m;
__pyx_t_18 = (((*((double *) ( /* dim=0 */ (__pyx_v_dice.data + __pyx_t_51 * __pyx_v_dice.strides[0]) ))) < ((*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_52 * __pyx_v_logProb.strides[0]) ))) - (*((double *) ( /* dim=0 */ (__pyx_v_logProbOld.data + __pyx_t_53 * __pyx_v_logProbOld.strides[0]) ))))) != 0);
if (__pyx_t_18) {
/* "estimate_gamma_m.pyx":91
*
* if dice[m] < (logProb[m]-logProbOld[m]):
* gammaHm[m] = gamma_m_New[m] # <<<<<<<<<<<<<<
*
* for i in range(n):
*/
__pyx_t_54 = __pyx_v_m;
__pyx_t_55 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_gammaHm.data + __pyx_t_55 * __pyx_v_gammaHm.strides[0]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_54 * __pyx_v_gamma_m_New.strides[0]) )));
/* "estimate_gamma_m.pyx":93
* gammaHm[m] = gamma_m_New[m]
*
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(n):
* covMatHm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":94
*
* for i in range(n):
* for j in range(n): # <<<<<<<<<<<<<<
* covMatHm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":95
* for i in range(n):
* for j in range(n):
* covMatHm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j] # <<<<<<<<<<<<<<
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
*/
__pyx_t_56 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
__pyx_t_57 = __pyx_v_m;
__pyx_t_58 = __pyx_v_i;
__pyx_t_59 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_covMatHm.data + __pyx_t_57 * __pyx_v_covMatHm.strides[0]) ) + __pyx_t_58 * __pyx_v_covMatHm.strides[1]) ) + __pyx_t_59 * __pyx_v_covMatHm.strides[2]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New_save.data + __pyx_t_56 * __pyx_v_covMat_m_New_save.strides[0]) )));
}
}
/* "estimate_gamma_m.pyx":97
* covMatHm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info) # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(i,n):
*/
__pyx_t_60 = (__pyx_v_m * __pyx_v_nsq);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotri(((char *)"L"), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_60 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":98
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(i,n):
* invCovMatHm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":99
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
* for i in range(n):
* for j in range(i,n): # <<<<<<<<<<<<<<
* invCovMatHm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i):
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = __pyx_v_i; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":100
* for i in range(n):
* for j in range(i,n):
* invCovMatHm[m,i,j] = covMat_m_New[m*nsq+i*n+j] # <<<<<<<<<<<<<<
* for j in range(i):
* invCovMatHm[m,i,j] = invCovMatHm[m,j,i]
*/
__pyx_t_61 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
__pyx_t_62 = __pyx_v_m;
__pyx_t_63 = __pyx_v_i;
__pyx_t_64 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatHm.data + __pyx_t_62 * __pyx_v_invCovMatHm.strides[0]) ) + __pyx_t_63 * __pyx_v_invCovMatHm.strides[1]) ) + __pyx_t_64 * __pyx_v_invCovMatHm.strides[2]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_61 * __pyx_v_covMat_m_New.strides[0]) )));
}
/* "estimate_gamma_m.pyx":101
* for j in range(i,n):
* invCovMatHm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i): # <<<<<<<<<<<<<<
* invCovMatHm[m,i,j] = invCovMatHm[m,j,i]
*
*/
__pyx_t_21 = __pyx_v_i;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":102
* invCovMatHm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i):
* invCovMatHm[m,i,j] = invCovMatHm[m,j,i] # <<<<<<<<<<<<<<
*
* logGammaHmProbPart1[m] = logProbPart1[m]
*/
__pyx_t_65 = __pyx_v_m;
__pyx_t_66 = __pyx_v_j;
__pyx_t_67 = __pyx_v_i;
__pyx_t_68 = __pyx_v_m;
__pyx_t_69 = __pyx_v_i;
__pyx_t_70 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatHm.data + __pyx_t_68 * __pyx_v_invCovMatHm.strides[0]) ) + __pyx_t_69 * __pyx_v_invCovMatHm.strides[1]) ) + __pyx_t_70 * __pyx_v_invCovMatHm.strides[2]) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatHm.data + __pyx_t_65 * __pyx_v_invCovMatHm.strides[0]) ) + __pyx_t_66 * __pyx_v_invCovMatHm.strides[1]) ) + __pyx_t_67 * __pyx_v_invCovMatHm.strides[2]) )));
}
}
/* "estimate_gamma_m.pyx":104
* invCovMatHm[m,i,j] = invCovMatHm[m,j,i]
*
* logGammaHmProbPart1[m] = logProbPart1[m] # <<<<<<<<<<<<<<
* acceptGammaHm[m] = acceptGammaHm[m] + 1
*
*/
__pyx_t_71 = __pyx_v_m;
__pyx_t_72 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logGammaHmProbPart1.data + __pyx_t_72 * __pyx_v_logGammaHmProbPart1.strides[0]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_71 * __pyx_v_logProbPart1.strides[0]) )));
/* "estimate_gamma_m.pyx":105
*
* logGammaHmProbPart1[m] = logProbPart1[m]
* acceptGammaHm[m] = acceptGammaHm[m] + 1 # <<<<<<<<<<<<<<
*
* # Update gammaFm
*/
__pyx_t_73 = __pyx_v_m;
__pyx_t_74 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_acceptGammaHm.data + __pyx_t_74 * __pyx_v_acceptGammaHm.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_acceptGammaHm.data + __pyx_t_73 * __pyx_v_acceptGammaHm.strides[0]) ))) + 1.0);
/* "estimate_gamma_m.pyx":90
* logProbOld[m] = logGammaHmProbPart1[m] + rateHm[m] * phiHm[m] + c_log(gammaHm[m]);
*
* if dice[m] < (logProb[m]-logProbOld[m]): # <<<<<<<<<<<<<<
* gammaHm[m] = gamma_m_New[m]
*
*/
}
/* "estimate_gamma_m.pyx":67
* with nogil:
* for m in prange(M):
* if gamma_m_New[m] < 1e6 : # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(n):
*/
}
goto __pyx_L33;
__pyx_L10_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L32;
__pyx_L32:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_cov;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_j;
__pyx_parallel_temp3 = __pyx_v_m;
}
__pyx_L33:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_cov = __pyx_parallel_temp0;
__pyx_v_i = __pyx_parallel_temp1;
__pyx_v_j = __pyx_parallel_temp2;
__pyx_v_m = __pyx_parallel_temp3;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L6_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "estimate_gamma_m.pyx":65
* dice[m] = c_log(random.uniform(0,1))
*
* with nogil: # <<<<<<<<<<<<<<
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L7;
}
__pyx_L6_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L7:;
}
}
/* "estimate_gamma_m.pyx":108
*
* # Update gammaFm
* for m in range(M): # <<<<<<<<<<<<<<
* gamma_m_New_log=random.normalvariate(c_log(state.gammaFm[m]),bGammaFm)
* gamma_m_New[m]=c_exp(gamma_m_New_log)
*/
__pyx_t_11 = __pyx_v_M;
__pyx_t_10 = __pyx_t_11;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_10; __pyx_t_8+=1) {
__pyx_v_m = __pyx_t_8;
/* "estimate_gamma_m.pyx":109
* # Update gammaFm
* for m in range(M):
* gamma_m_New_log=random.normalvariate(c_log(state.gammaFm[m]),bGammaFm) # <<<<<<<<<<<<<<
* gamma_m_New[m]=c_exp(gamma_m_New_log)
* dice[m] = c_log(random.uniform(0,1))
*/
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_normalvariate); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_state); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_gammaFm); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, __pyx_v_m, int, 1, __Pyx_PyInt_From_int, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_27 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_27 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyFloat_FromDouble(log(__pyx_t_27)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = NULL;
__pyx_t_13 = 0;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_14))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_14);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_14, function);
__pyx_t_13 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_14)) {
PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_t_1, __pyx_v_bGammaFm};
__pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_13, 2+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_14)) {
PyObject *__pyx_temp[3] = {__pyx_t_2, __pyx_t_1, __pyx_v_bGammaFm};
__pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_14, __pyx_temp+1-__pyx_t_13, 2+__pyx_t_13); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_13); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_2) {
__Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_2); __pyx_t_2 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_13, __pyx_t_1);
__Pyx_INCREF(__pyx_v_bGammaFm);
__Pyx_GIVEREF(__pyx_v_bGammaFm);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_13, __pyx_v_bGammaFm);
__pyx_t_1 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 109, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__Pyx_XDECREF_SET(__pyx_v_gamma_m_New_log, __pyx_t_5);
__pyx_t_5 = 0;
/* "estimate_gamma_m.pyx":110
* for m in range(M):
* gamma_m_New_log=random.normalvariate(c_log(state.gammaFm[m]),bGammaFm)
* gamma_m_New[m]=c_exp(gamma_m_New_log) # <<<<<<<<<<<<<<
* dice[m] = c_log(random.uniform(0,1))
*
*/
__pyx_t_27 = __pyx_PyFloat_AsDouble(__pyx_v_gamma_m_New_log); if (unlikely((__pyx_t_27 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 110, __pyx_L1_error)
__pyx_t_75 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_75 * __pyx_v_gamma_m_New.strides[0]) )) = exp(__pyx_t_27);
/* "estimate_gamma_m.pyx":111
* gamma_m_New_log=random.normalvariate(c_log(state.gammaFm[m]),bGammaFm)
* gamma_m_New[m]=c_exp(gamma_m_New_log)
* dice[m] = c_log(random.uniform(0,1)) # <<<<<<<<<<<<<<
*
* Rm = state.RFm;
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_uniform); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_14, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__pyx_t_27 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_27 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_76 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_dice.data + __pyx_t_76 * __pyx_v_dice.strides[0]) )) = log(__pyx_t_27);
}
/* "estimate_gamma_m.pyx":113
* dice[m] = c_log(random.uniform(0,1))
*
* Rm = state.RFm; # <<<<<<<<<<<<<<
* logProbPart1 = np.zeros(M)
* logProb = np.zeros(M)
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_state); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_RFm); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_4 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_t_14, PyBUF_WRITABLE); if (unlikely(!__pyx_t_4.memview)) __PYX_ERR(0, 113, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_Rm, 1);
__pyx_v_Rm = __pyx_t_4;
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
/* "estimate_gamma_m.pyx":114
*
* Rm = state.RFm;
* logProbPart1 = np.zeros(M) # <<<<<<<<<<<<<<
* logProb = np.zeros(M)
*
*/
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_14 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_1, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_5);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_14, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_logProbPart1, 1);
__pyx_v_logProbPart1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":115
* Rm = state.RFm;
* logProbPart1 = np.zeros(M)
* logProb = np.zeros(M) # <<<<<<<<<<<<<<
*
* with nogil:
*/
__Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_1 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_1)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_1);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_14 = (__pyx_t_1) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_1, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_9);
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_14);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_14, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
__PYX_XDEC_MEMVIEW(&__pyx_v_logProb, 1);
__pyx_v_logProb = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "estimate_gamma_m.pyx":117
* logProb = np.zeros(M)
*
* with nogil: # <<<<<<<<<<<<<<
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
/* "estimate_gamma_m.pyx":118
*
* with nogil:
* for m in prange(M): # <<<<<<<<<<<<<<
* if gamma_m_New[m] < 1e6 :
* for i in range(n):
*/
__pyx_t_11 = __pyx_v_M;
if (1 == 0) abort();
{
double __pyx_parallel_temp0 = ((double)__PYX_NAN());
int __pyx_parallel_temp1 = ((int)0xbad0bad0);
int __pyx_parallel_temp2 = ((int)0xbad0bad0);
int __pyx_parallel_temp3 = ((int)0xbad0bad0);
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_8 = (__pyx_t_11 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_8 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_100, __pyx_t_101, __pyx_t_102, __pyx_t_103, __pyx_t_104, __pyx_t_105, __pyx_t_106, __pyx_t_107, __pyx_t_108, __pyx_t_109, __pyx_t_110, __pyx_t_111, __pyx_t_112, __pyx_t_113, __pyx_t_114, __pyx_t_115, __pyx_t_116, __pyx_t_117, __pyx_t_118, __pyx_t_119, __pyx_t_12, __pyx_t_120, __pyx_t_121, __pyx_t_122, __pyx_t_123, __pyx_t_124, __pyx_t_125, __pyx_t_126, __pyx_t_127, __pyx_t_13, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_27, __pyx_t_77, __pyx_t_78, __pyx_t_79, __pyx_t_80, __pyx_t_81, __pyx_t_82, __pyx_t_83, __pyx_t_84, __pyx_t_85, __pyx_t_86, __pyx_t_87, __pyx_t_88, __pyx_t_89, __pyx_t_90, __pyx_t_91, __pyx_t_92, __pyx_t_93, __pyx_t_94, __pyx_t_95, __pyx_t_96, __pyx_t_97, __pyx_t_98, __pyx_t_99) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_cov) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) firstprivate(__pyx_v_m) lastprivate(__pyx_v_m)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_8; __pyx_t_10++){
if (__pyx_parallel_why < 2)
{
__pyx_v_m = (int)(0 + 1 * __pyx_t_10);
/* Initialize private variables to invalid values */
__pyx_v_cov = ((double)__PYX_NAN());
__pyx_v_i = ((int)0xbad0bad0);
__pyx_v_j = ((int)0xbad0bad0);
/* "estimate_gamma_m.pyx":119
* with nogil:
* for m in prange(M):
* if gamma_m_New[m] < 1e6 : # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(n):
*/
__pyx_t_77 = __pyx_v_m;
__pyx_t_18 = (((*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_77 * __pyx_v_gamma_m_New.strides[0]) ))) < 1e6) != 0);
if (__pyx_t_18) {
/* "estimate_gamma_m.pyx":120
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":121
* if gamma_m_New[m] < 1e6 :
* for i in range(n):
* for j in range(n): # <<<<<<<<<<<<<<
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":122
* for i in range(n):
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]); # <<<<<<<<<<<<<<
* covMat_m_New[m*nsq+i*n+j] = cov
* covMat_m_New_save[m*nsq+i*n+j] = cov
*/
__pyx_t_78 = __pyx_v_i;
__pyx_t_79 = __pyx_v_j;
__pyx_t_27 = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_dist.data + __pyx_t_78 * __pyx_v_dist.strides[0]) ) + __pyx_t_79 * __pyx_v_dist.strides[1]) ))));
__pyx_t_80 = __pyx_v_m;
__pyx_t_12 = (*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_80 * __pyx_v_gamma_m_New.strides[0]) )));
if (unlikely(__pyx_t_12 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 122, __pyx_L41_error)
}
__pyx_v_cov = exp((__pyx_t_27 / __pyx_t_12));
/* "estimate_gamma_m.pyx":123
* for j in range(n):
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov # <<<<<<<<<<<<<<
* covMat_m_New_save[m*nsq+i*n+j] = cov
*
*/
__pyx_t_81 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_81 * __pyx_v_covMat_m_New.strides[0]) )) = __pyx_v_cov;
/* "estimate_gamma_m.pyx":124
* cov = c_exp(-dist[i,j]/gamma_m_New[m]);
* covMat_m_New[m*nsq+i*n+j] = cov
* covMat_m_New_save[m*nsq+i*n+j] = cov # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_82 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New_save.data + __pyx_t_82 * __pyx_v_covMat_m_New_save.strides[0]) )) = __pyx_v_cov;
}
}
/* "estimate_gamma_m.pyx":127
*
*
* dpotrf('L',&n,&covMat_m_New[m*nsq],&n,&info); # <<<<<<<<<<<<<<
*
* for i in range(n):
*/
__pyx_t_83 = (__pyx_v_m * __pyx_v_nsq);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotrf(((char *)"L"), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_83 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":129
* dpotrf('L',&n,&covMat_m_New[m*nsq],&n,&info);
*
* for i in range(n): # <<<<<<<<<<<<<<
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]);
* logProbPart1[m] *= -1 * Rm[m];
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":130
*
* for i in range(n):
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]); # <<<<<<<<<<<<<<
* logProbPart1[m] *= -1 * Rm[m];
*
*/
__pyx_t_84 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_i);
__pyx_t_85 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_85 * __pyx_v_logProbPart1.strides[0]) )) += log((*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_84 * __pyx_v_covMat_m_New.strides[0]) ))));
}
/* "estimate_gamma_m.pyx":131
* for i in range(n):
* logProbPart1[m] += c_log(covMat_m_New[m*nsq+i*n+i]);
* logProbPart1[m] *= -1 * Rm[m]; # <<<<<<<<<<<<<<
*
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XFmrDiff[m*offset],&n,&info);
*/
__pyx_t_86 = __pyx_v_m;
__pyx_t_87 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_87 * __pyx_v_logProbPart1.strides[0]) )) *= (-1L * (*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_86 * __pyx_v_Rm.strides[0]) ))));
/* "estimate_gamma_m.pyx":133
* logProbPart1[m] *= -1 * Rm[m];
*
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XFmrDiff[m*offset],&n,&info); # <<<<<<<<<<<<<<
*
* for i in range(Rm[m]*n):
*/
__pyx_t_88 = __pyx_v_m;
__pyx_t_89 = (__pyx_v_m * __pyx_v_nsq);
__pyx_t_90 = (__pyx_v_m * __pyx_v_offset);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotrs(((char *)"L"), (&__pyx_v_n), (&(*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_88 * __pyx_v_Rm.strides[0]) )))), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_89 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_XFmrDiff.data + __pyx_t_90 * __pyx_v_XFmrDiff.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":135
* dpotrs('L',&n,&Rm[m],&covMat_m_New[m*nsq],&n,&XFmrDiff[m*offset],&n,&info);
*
* for i in range(Rm[m]*n): # <<<<<<<<<<<<<<
* logProb[m] += XFmrDiff[m*offset+i]*XFmrDiff_original[m*offset+i]
*
*/
__pyx_t_91 = __pyx_v_m;
__pyx_t_13 = ((*((int *) ( /* dim=0 */ (__pyx_v_Rm.data + __pyx_t_91 * __pyx_v_Rm.strides[0]) ))) * __pyx_v_n);
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":136
*
* for i in range(Rm[m]*n):
* logProb[m] += XFmrDiff[m*offset+i]*XFmrDiff_original[m*offset+i] # <<<<<<<<<<<<<<
*
* logProb[m] = logProbPart1[m] + logProb[m]*phiFm[m]*(-0.5)
*/
__pyx_t_92 = ((__pyx_v_m * __pyx_v_offset) + __pyx_v_i);
__pyx_t_93 = ((__pyx_v_m * __pyx_v_offset) + __pyx_v_i);
__pyx_t_94 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_94 * __pyx_v_logProb.strides[0]) )) += ((*((double *) ( /* dim=0 */ (__pyx_v_XFmrDiff.data + __pyx_t_92 * __pyx_v_XFmrDiff.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_XFmrDiff_original.data + __pyx_t_93 * __pyx_v_XFmrDiff_original.strides[0]) ))));
}
/* "estimate_gamma_m.pyx":138
* logProb[m] += XFmrDiff[m*offset+i]*XFmrDiff_original[m*offset+i]
*
* logProb[m] = logProbPart1[m] + logProb[m]*phiFm[m]*(-0.5) # <<<<<<<<<<<<<<
*
* logProbOld[m] = logGammaFmProbPart1[m] + rateFm[m] * phiFm[m] + c_log(gammaFm[m]);
*/
__pyx_t_95 = __pyx_v_m;
__pyx_t_96 = __pyx_v_m;
__pyx_t_97 = __pyx_v_m;
__pyx_t_98 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_98 * __pyx_v_logProb.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_95 * __pyx_v_logProbPart1.strides[0]) ))) + (((*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_96 * __pyx_v_logProb.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_phiFm.data + __pyx_t_97 * __pyx_v_phiFm.strides[0]) )))) * -0.5));
/* "estimate_gamma_m.pyx":140
* logProb[m] = logProbPart1[m] + logProb[m]*phiFm[m]*(-0.5)
*
* logProbOld[m] = logGammaFmProbPart1[m] + rateFm[m] * phiFm[m] + c_log(gammaFm[m]); # <<<<<<<<<<<<<<
*
* if dice[m] < (logProb[m]-logProbOld[m]):
*/
__pyx_t_99 = __pyx_v_m;
__pyx_t_100 = __pyx_v_m;
__pyx_t_101 = __pyx_v_m;
__pyx_t_102 = __pyx_v_m;
__pyx_t_103 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logProbOld.data + __pyx_t_103 * __pyx_v_logProbOld.strides[0]) )) = (((*((double *) ( /* dim=0 */ (__pyx_v_logGammaFmProbPart1.data + __pyx_t_99 * __pyx_v_logGammaFmProbPart1.strides[0]) ))) + ((*((double *) ( /* dim=0 */ (__pyx_v_rateFm.data + __pyx_t_100 * __pyx_v_rateFm.strides[0]) ))) * (*((double *) ( /* dim=0 */ (__pyx_v_phiFm.data + __pyx_t_101 * __pyx_v_phiFm.strides[0]) ))))) + log((*((double *) ( /* dim=0 */ (__pyx_v_gammaFm.data + __pyx_t_102 * __pyx_v_gammaFm.strides[0]) )))));
/* "estimate_gamma_m.pyx":142
* logProbOld[m] = logGammaFmProbPart1[m] + rateFm[m] * phiFm[m] + c_log(gammaFm[m]);
*
* if dice[m] < (logProb[m]-logProbOld[m]): # <<<<<<<<<<<<<<
* gammaFm[m] = gamma_m_New[m]
*
*/
__pyx_t_104 = __pyx_v_m;
__pyx_t_105 = __pyx_v_m;
__pyx_t_106 = __pyx_v_m;
__pyx_t_18 = (((*((double *) ( /* dim=0 */ (__pyx_v_dice.data + __pyx_t_104 * __pyx_v_dice.strides[0]) ))) < ((*((double *) ( /* dim=0 */ (__pyx_v_logProb.data + __pyx_t_105 * __pyx_v_logProb.strides[0]) ))) - (*((double *) ( /* dim=0 */ (__pyx_v_logProbOld.data + __pyx_t_106 * __pyx_v_logProbOld.strides[0]) ))))) != 0);
if (__pyx_t_18) {
/* "estimate_gamma_m.pyx":143
*
* if dice[m] < (logProb[m]-logProbOld[m]):
* gammaFm[m] = gamma_m_New[m] # <<<<<<<<<<<<<<
*
* for i in range(n):
*/
__pyx_t_107 = __pyx_v_m;
__pyx_t_108 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_gammaFm.data + __pyx_t_108 * __pyx_v_gammaFm.strides[0]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_gamma_m_New.data + __pyx_t_107 * __pyx_v_gamma_m_New.strides[0]) )));
/* "estimate_gamma_m.pyx":145
* gammaFm[m] = gamma_m_New[m]
*
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(n):
* covMatFm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":146
*
* for i in range(n):
* for j in range(n): # <<<<<<<<<<<<<<
* covMatFm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":147
* for i in range(n):
* for j in range(n):
* covMatFm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j] # <<<<<<<<<<<<<<
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
*/
__pyx_t_109 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
__pyx_t_110 = __pyx_v_m;
__pyx_t_111 = __pyx_v_i;
__pyx_t_112 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_covMatFm.data + __pyx_t_110 * __pyx_v_covMatFm.strides[0]) ) + __pyx_t_111 * __pyx_v_covMatFm.strides[1]) ) + __pyx_t_112 * __pyx_v_covMatFm.strides[2]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New_save.data + __pyx_t_109 * __pyx_v_covMat_m_New_save.strides[0]) )));
}
}
/* "estimate_gamma_m.pyx":149
* covMatFm[m,i,j] = covMat_m_New_save[m*nsq+i*n+j]
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info) # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(i,n):
*/
__pyx_t_113 = (__pyx_v_m * __pyx_v_nsq);
__pyx_f_5scipy_6linalg_13cython_lapack_dpotri(((char *)"L"), (&__pyx_v_n), (&(*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_113 * __pyx_v_covMat_m_New.strides[0]) )))), (&__pyx_v_n), (&__pyx_v_info));
/* "estimate_gamma_m.pyx":150
*
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(i,n):
* invCovMatFm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
*/
__pyx_t_13 = __pyx_v_n;
__pyx_t_19 = __pyx_t_13;
for (__pyx_t_20 = 0; __pyx_t_20 < __pyx_t_19; __pyx_t_20+=1) {
__pyx_v_i = __pyx_t_20;
/* "estimate_gamma_m.pyx":151
* dpotri('L',&n,&covMat_m_New[m*nsq],&n,&info)
* for i in range(n):
* for j in range(i,n): # <<<<<<<<<<<<<<
* invCovMatFm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i):
*/
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = __pyx_v_i; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":152
* for i in range(n):
* for j in range(i,n):
* invCovMatFm[m,i,j] = covMat_m_New[m*nsq+i*n+j] # <<<<<<<<<<<<<<
* for j in range(i):
* invCovMatFm[m,i,j] = invCovMatFm[m,j,i]
*/
__pyx_t_114 = (((__pyx_v_m * __pyx_v_nsq) + (__pyx_v_i * __pyx_v_n)) + __pyx_v_j);
__pyx_t_115 = __pyx_v_m;
__pyx_t_116 = __pyx_v_i;
__pyx_t_117 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatFm.data + __pyx_t_115 * __pyx_v_invCovMatFm.strides[0]) ) + __pyx_t_116 * __pyx_v_invCovMatFm.strides[1]) ) + __pyx_t_117 * __pyx_v_invCovMatFm.strides[2]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_covMat_m_New.data + __pyx_t_114 * __pyx_v_covMat_m_New.strides[0]) )));
}
/* "estimate_gamma_m.pyx":153
* for j in range(i,n):
* invCovMatFm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i): # <<<<<<<<<<<<<<
* invCovMatFm[m,i,j] = invCovMatFm[m,j,i]
*
*/
__pyx_t_21 = __pyx_v_i;
__pyx_t_22 = __pyx_t_21;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "estimate_gamma_m.pyx":154
* invCovMatFm[m,i,j] = covMat_m_New[m*nsq+i*n+j]
* for j in range(i):
* invCovMatFm[m,i,j] = invCovMatFm[m,j,i] # <<<<<<<<<<<<<<
*
* logGammaFmProbPart1[m] = logProbPart1[m]
*/
__pyx_t_118 = __pyx_v_m;
__pyx_t_119 = __pyx_v_j;
__pyx_t_120 = __pyx_v_i;
__pyx_t_121 = __pyx_v_m;
__pyx_t_122 = __pyx_v_i;
__pyx_t_123 = __pyx_v_j;
*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatFm.data + __pyx_t_121 * __pyx_v_invCovMatFm.strides[0]) ) + __pyx_t_122 * __pyx_v_invCovMatFm.strides[1]) ) + __pyx_t_123 * __pyx_v_invCovMatFm.strides[2]) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_invCovMatFm.data + __pyx_t_118 * __pyx_v_invCovMatFm.strides[0]) ) + __pyx_t_119 * __pyx_v_invCovMatFm.strides[1]) ) + __pyx_t_120 * __pyx_v_invCovMatFm.strides[2]) )));
}
}
/* "estimate_gamma_m.pyx":156
* invCovMatFm[m,i,j] = invCovMatFm[m,j,i]
*
* logGammaFmProbPart1[m] = logProbPart1[m] # <<<<<<<<<<<<<<
* acceptGammaFm[m] = acceptGammaFm[m] + 1
*
*/
__pyx_t_124 = __pyx_v_m;
__pyx_t_125 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_logGammaFmProbPart1.data + __pyx_t_125 * __pyx_v_logGammaFmProbPart1.strides[0]) )) = (*((double *) ( /* dim=0 */ (__pyx_v_logProbPart1.data + __pyx_t_124 * __pyx_v_logProbPart1.strides[0]) )));
/* "estimate_gamma_m.pyx":157
*
* logGammaFmProbPart1[m] = logProbPart1[m]
* acceptGammaFm[m] = acceptGammaFm[m] + 1 # <<<<<<<<<<<<<<
*
* return()
*/
__pyx_t_126 = __pyx_v_m;
__pyx_t_127 = __pyx_v_m;
*((double *) ( /* dim=0 */ (__pyx_v_acceptGammaFm.data + __pyx_t_127 * __pyx_v_acceptGammaFm.strides[0]) )) = ((*((double *) ( /* dim=0 */ (__pyx_v_acceptGammaFm.data + __pyx_t_126 * __pyx_v_acceptGammaFm.strides[0]) ))) + 1.0);
/* "estimate_gamma_m.pyx":142
* logProbOld[m] = logGammaFmProbPart1[m] + rateFm[m] * phiFm[m] + c_log(gammaFm[m]);
*
* if dice[m] < (logProb[m]-logProbOld[m]): # <<<<<<<<<<<<<<
* gammaFm[m] = gamma_m_New[m]
*
*/
}
/* "estimate_gamma_m.pyx":119
* with nogil:
* for m in prange(M):
* if gamma_m_New[m] < 1e6 : # <<<<<<<<<<<<<<
* for i in range(n):
* for j in range(n):
*/
}
goto __pyx_L64;
__pyx_L41_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L63;
__pyx_L63:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_cov;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_j;
__pyx_parallel_temp3 = __pyx_v_m;
}
__pyx_L64:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_cov = __pyx_parallel_temp0;
__pyx_v_i = __pyx_parallel_temp1;
__pyx_v_j = __pyx_parallel_temp2;
__pyx_v_m = __pyx_parallel_temp3;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L37_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "estimate_gamma_m.pyx":117
* logProb = np.zeros(M)
*
* with nogil: # <<<<<<<<<<<<<<
* for m in prange(M):
* if gamma_m_New[m] < 1e6 :
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L38;
}
__pyx_L37_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L38:;
}
}
/* "estimate_gamma_m.pyx":159
* acceptGammaFm[m] = acceptGammaFm[m] + 1
*
* return() # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
/* "estimate_gamma_m.pyx":19
*
*
* def estimate_gamma_m(bGammaHm=0.05,bGammaFm=0.05): # <<<<<<<<<<<<<<
*
* cdef double[:,:] dist = state.dist
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__PYX_XDEC_MEMVIEW(&__pyx_t_3, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 1);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_14);
__Pyx_AddTraceback("estimate_gamma_m.estimate_gamma_m", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_dist, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_Rm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_XHmrDiff, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_XHmrDiff_original, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_XFmrDiff, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_XFmrDiff_original, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_logGammaHmProbPart1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_covMatHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_invCovMatHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_acceptGammaHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_rateHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_phiHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_gammaHm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_logGammaFmProbPart1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_covMatFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_invCovMatFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_acceptGammaFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_rateFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_phiFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_gammaFm, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_covMat_m_New, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_covMat_m_New_save, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_gamma_m_New, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_logProbPart1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_logProb, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_logProbOld, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_dice, 1);
__Pyx_XDECREF(__pyx_v_gamma_m_New_log);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"estimate_gamma_m.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"estimate_gamma_m.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"estimate_gamma_m.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"estimate_gamma_m._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
static int __pyx_import_star_set(PyObject *o, PyObject* py_name, char *name) {
static const char* internal_type_names[] = {
"Enum",
"FILE",
"PyObject",
"PyThread_type_lock",
"Py_intptr_t",
"__Pyx_TypeInfo",
"__Pyx_memviewslice",
"__pyx_atomic_int",
"__pyx_buffer",
"__pyx_ctuple_Py_ssize_t",
"__pyx_ctuple_Py_ssize_t_struct",
"__pyx_ctuple_char__ptr",
"__pyx_ctuple_char__ptr_struct",
"__pyx_ctuple_int",
"__pyx_ctuple_int__and_Py_ssize_t",
"__pyx_ctuple_int__and_Py_ssize_t__and_Py_ssize_t",
"__pyx_ctuple_int__and_Py_ssize_t__and_Py_ssize_t_struct",
"__pyx_ctuple_int__and_Py_ssize_t_struct",
"__pyx_ctuple_int__and_int",
"__pyx_ctuple_int__and_int_struct",
"__pyx_ctuple_int_struct",
"__pyx_ctuple_long",
"__pyx_ctuple_long__and_long",
"__pyx_ctuple_long__and_long__and_long",
"__pyx_ctuple_long__and_long__and_long_struct",
"__pyx_ctuple_long__and_long_struct",
"__pyx_ctuple_long_struct",
"__pyx_memoryview",
"_memoryviewslice",
"array",
"memoryview",
0
};
const char** type_name = internal_type_names;
while (*type_name) {
if (__Pyx_StrEq(name, *type_name)) {
PyErr_Format(PyExc_TypeError, "Cannot overwrite C type %s", name);
goto bad;
}
type_name++;
}
if (0);
else if (__Pyx_StrEq(name, "Py_None")) {
PyErr_Format(PyExc_TypeError, "Cannot convert Python object Py_None to PyObject *");
__PYX_ERR(1, 57, __pyx_L2_error)
}
else if (__Pyx_StrEq(name, "__pyx_memoryview_thread_locks")) {
PyErr_Format(PyExc_TypeError, "Cannot convert Python object __pyx_memoryview_thread_locks to PyThread_type_lock [8]");
__PYX_ERR(1, 317, __pyx_L2_error)
}
else if (__Pyx_StrEq(name, "__pyx_memoryview_thread_locks_used")) {
__pyx_memoryview_thread_locks_used = __Pyx_PyInt_As_int(o); if (unlikely((__pyx_memoryview_thread_locks_used == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 316, __pyx_L2_error)
}
else if (__Pyx_StrEq(name, "contiguous")) {
Py_INCREF(o);
Py_DECREF(contiguous);
contiguous = o;
}
else if (__Pyx_StrEq(name, "generic")) {
Py_INCREF(o);
Py_DECREF(generic);
generic = o;
}
else if (__Pyx_StrEq(name, "indirect")) {
Py_INCREF(o);
Py_DECREF(indirect);
indirect = o;
}
else if (__Pyx_StrEq(name, "indirect_contiguous")) {
Py_INCREF(o);
Py_DECREF(indirect_contiguous);
indirect_contiguous = o;
}
else if (__Pyx_StrEq(name, "strided")) {
Py_INCREF(o);
Py_DECREF(strided);
strided = o;
}
else {
if (PyObject_SetAttr(__pyx_m, py_name, o) < 0) goto bad;
}
return 0;
__pyx_L2_error:;
__Pyx_AddTraceback("estimate_gamma_m", __pyx_clineno, __pyx_lineno, __pyx_filename);
bad:
return -1;
}
static int
__Pyx_import_all_from(PyObject *locals, PyObject *v)
{
PyObject *all = PyObject_GetAttrString(v, "__all__");
PyObject *dict, *name, *value;
int skip_leading_underscores = 0;
int pos, err;
if (all == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1;
PyErr_Clear();
dict = PyObject_GetAttrString(v, "__dict__");
if (dict == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1;
PyErr_SetString(PyExc_ImportError,
"from-import-* object has no __dict__ and no __all__");
return -1;
}
#if PY_MAJOR_VERSION < 3
all = PyObject_CallMethod(dict, (char *)"keys", NULL);
#else
all = PyMapping_Keys(dict);
#endif
Py_DECREF(dict);
if (all == NULL)
return -1;
skip_leading_underscores = 1;
}
for (pos = 0, err = 0; ; pos++) {
name = PySequence_GetItem(all, pos);
if (name == NULL) {
if (!PyErr_ExceptionMatches(PyExc_IndexError))
err = -1;
else
PyErr_Clear();
break;
}
if (skip_leading_underscores &&
#if PY_MAJOR_VERSION < 3
PyString_Check(name) &&
PyString_AS_STRING(name)[0] == '_')
#else
PyUnicode_Check(name) &&
PyUnicode_AS_UNICODE(name)[0] == '_')
#endif
{
Py_DECREF(name);
continue;
}
value = PyObject_GetAttr(v, name);
if (value == NULL)
err = -1;
else if (PyDict_CheckExact(locals))
err = PyDict_SetItem(locals, name, value);
else
err = PyObject_SetItem(locals, name, value);
Py_DECREF(name);
Py_XDECREF(value);
if (err != 0)
break;
}
Py_DECREF(all);
return err;
}
static int __pyx_import_star(PyObject* m) {
int i;
int ret = -1;
char* s;
PyObject *locals = 0;
PyObject *list = 0;
#if PY_MAJOR_VERSION >= 3
PyObject *utf8_name = 0;
#endif
PyObject *name;
PyObject *item;
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
for(i=0; i<PyList_GET_SIZE(list); i++) {
name = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 0);
item = PyTuple_GET_ITEM(PyList_GET_ITEM(list, i), 1);
#if PY_MAJOR_VERSION >= 3
utf8_name = PyUnicode_AsUTF8String(name);
if (!utf8_name) goto bad;
s = PyBytes_AS_STRING(utf8_name);
if (__pyx_import_star_set(item, name, s) < 0) goto bad;
Py_DECREF(utf8_name); utf8_name = 0;
#else
s = PyString_AsString(name);
if (!s) goto bad;
if (__pyx_import_star_set(item, name, s) < 0) goto bad;
#endif
}
ret = 0;
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
#if PY_MAJOR_VERSION >= 3
Py_XDECREF(utf8_name);
#endif
return ret;
}
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_estimate_gamma_m(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_estimate_gamma_m},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"estimate_gamma_m",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_RFm, __pyx_k_RFm, sizeof(__pyx_k_RFm), 0, 0, 1, 1},
{&__pyx_n_s_RHm, __pyx_k_RHm, sizeof(__pyx_k_RHm), 0, 0, 1, 1},
{&__pyx_n_s_Rm, __pyx_k_Rm, sizeof(__pyx_k_Rm), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_XFmrDiff, __pyx_k_XFmrDiff, sizeof(__pyx_k_XFmrDiff), 0, 0, 1, 1},
{&__pyx_n_s_XFmrDiff_original, __pyx_k_XFmrDiff_original, sizeof(__pyx_k_XFmrDiff_original), 0, 0, 1, 1},
{&__pyx_n_s_XHmrDiff, __pyx_k_XHmrDiff, sizeof(__pyx_k_XHmrDiff), 0, 0, 1, 1},
{&__pyx_n_s_XHmrDiff_original, __pyx_k_XHmrDiff_original, sizeof(__pyx_k_XHmrDiff_original), 0, 0, 1, 1},
{&__pyx_n_s__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 0, 1, 1},
{&__pyx_n_s_acceptGammaFm, __pyx_k_acceptGammaFm, sizeof(__pyx_k_acceptGammaFm), 0, 0, 1, 1},
{&__pyx_n_s_acceptGammaHm, __pyx_k_acceptGammaHm, sizeof(__pyx_k_acceptGammaHm), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_bGammaFm, __pyx_k_bGammaFm, sizeof(__pyx_k_bGammaFm), 0, 0, 1, 1},
{&__pyx_n_s_bGammaHm, __pyx_k_bGammaHm, sizeof(__pyx_k_bGammaHm), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_cov, __pyx_k_cov, sizeof(__pyx_k_cov), 0, 0, 1, 1},
{&__pyx_n_s_covMatFm, __pyx_k_covMatFm, sizeof(__pyx_k_covMatFm), 0, 0, 1, 1},
{&__pyx_n_s_covMatHm, __pyx_k_covMatHm, sizeof(__pyx_k_covMatHm), 0, 0, 1, 1},
{&__pyx_n_s_covMat_m_New, __pyx_k_covMat_m_New, sizeof(__pyx_k_covMat_m_New), 0, 0, 1, 1},
{&__pyx_n_s_covMat_m_New_save, __pyx_k_covMat_m_New_save, sizeof(__pyx_k_covMat_m_New_save), 0, 0, 1, 1},
{&__pyx_n_s_dice, __pyx_k_dice, sizeof(__pyx_k_dice), 0, 0, 1, 1},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dist, __pyx_k_dist, sizeof(__pyx_k_dist), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_estimate_gamma_m, __pyx_k_estimate_gamma_m, sizeof(__pyx_k_estimate_gamma_m), 0, 0, 1, 1},
{&__pyx_kp_s_estimate_gamma_m_pyx, __pyx_k_estimate_gamma_m_pyx, sizeof(__pyx_k_estimate_gamma_m_pyx), 0, 0, 1, 0},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_flatten, __pyx_k_flatten, sizeof(__pyx_k_flatten), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_gammaFm, __pyx_k_gammaFm, sizeof(__pyx_k_gammaFm), 0, 0, 1, 1},
{&__pyx_n_s_gammaHm, __pyx_k_gammaHm, sizeof(__pyx_k_gammaHm), 0, 0, 1, 1},
{&__pyx_n_s_gamma_m_New, __pyx_k_gamma_m_New, sizeof(__pyx_k_gamma_m_New), 0, 0, 1, 1},
{&__pyx_n_s_gamma_m_New_log, __pyx_k_gamma_m_New_log, sizeof(__pyx_k_gamma_m_New_log), 0, 0, 1, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_info, __pyx_k_info, sizeof(__pyx_k_info), 0, 0, 1, 1},
{&__pyx_n_s_invCovMatFm, __pyx_k_invCovMatFm, sizeof(__pyx_k_invCovMatFm), 0, 0, 1, 1},
{&__pyx_n_s_invCovMatHm, __pyx_k_invCovMatHm, sizeof(__pyx_k_invCovMatHm), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_linalg, __pyx_k_linalg, sizeof(__pyx_k_linalg), 0, 0, 1, 1},
{&__pyx_n_s_logGammaFmProbPart1, __pyx_k_logGammaFmProbPart1, sizeof(__pyx_k_logGammaFmProbPart1), 0, 0, 1, 1},
{&__pyx_n_s_logGammaHmProbPart1, __pyx_k_logGammaHmProbPart1, sizeof(__pyx_k_logGammaHmProbPart1), 0, 0, 1, 1},
{&__pyx_n_s_logProb, __pyx_k_logProb, sizeof(__pyx_k_logProb), 0, 0, 1, 1},
{&__pyx_n_s_logProbOld, __pyx_k_logProbOld, sizeof(__pyx_k_logProbOld), 0, 0, 1, 1},
{&__pyx_n_s_logProbPart1, __pyx_k_logProbPart1, sizeof(__pyx_k_logProbPart1), 0, 0, 1, 1},
{&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_max, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_normalvariate, __pyx_k_normalvariate, sizeof(__pyx_k_normalvariate), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_nsq, __pyx_k_nsq, sizeof(__pyx_k_nsq), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_offset, __pyx_k_offset, sizeof(__pyx_k_offset), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_phiFm, __pyx_k_phiFm, sizeof(__pyx_k_phiFm), 0, 0, 1, 1},
{&__pyx_n_s_phiHm, __pyx_k_phiHm, sizeof(__pyx_k_phiHm), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_rateFm, __pyx_k_rateFm, sizeof(__pyx_k_rateFm), 0, 0, 1, 1},
{&__pyx_n_s_rateHm, __pyx_k_rateHm, sizeof(__pyx_k_rateHm), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_scipy, __pyx_k_scipy, sizeof(__pyx_k_scipy), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_spl, __pyx_k_spl, sizeof(__pyx_k_spl), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_state, __pyx_k_state, sizeof(__pyx_k_state), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_tools, __pyx_k_tools, sizeof(__pyx_k_tools), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_uniform, __pyx_k_uniform, sizeof(__pyx_k_uniform), 0, 0, 1, 1},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_max = __Pyx_GetBuiltinName(__pyx_n_s_max); if (!__pyx_builtin_max) __PYX_ERR(0, 49, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 60, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "estimate_gamma_m.pyx":63
* gamma_m_New_log=random.normalvariate(c_log(state.gammaHm[m]),bGammaHm)
* gamma_m_New[m]=c_exp(gamma_m_New_log)
* dice[m] = c_log(random.uniform(0,1)) # <<<<<<<<<<<<<<
*
* with nogil:
*/
__pyx_tuple_ = PyTuple_Pack(2, __pyx_int_0, __pyx_int_1); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "estimate_gamma_m.pyx":19
*
*
* def estimate_gamma_m(bGammaHm=0.05,bGammaFm=0.05): # <<<<<<<<<<<<<<
*
* cdef double[:,:] dist = state.dist
*/
__pyx_tuple__21 = PyTuple_Pack(39, __pyx_n_s_bGammaHm, __pyx_n_s_bGammaFm, __pyx_n_s_dist, __pyx_n_s_Rm, __pyx_n_s_XHmrDiff, __pyx_n_s_XHmrDiff_original, __pyx_n_s_XFmrDiff, __pyx_n_s_XFmrDiff_original, __pyx_n_s_logGammaHmProbPart1, __pyx_n_s_covMatHm, __pyx_n_s_invCovMatHm, __pyx_n_s_acceptGammaHm, __pyx_n_s_rateHm, __pyx_n_s_phiHm, __pyx_n_s_gammaHm, __pyx_n_s_logGammaFmProbPart1, __pyx_n_s_covMatFm, __pyx_n_s_invCovMatFm, __pyx_n_s_acceptGammaFm, __pyx_n_s_rateFm, __pyx_n_s_phiFm, __pyx_n_s_gammaFm, __pyx_n_s_M, __pyx_n_s_n, __pyx_n_s_m, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_info, __pyx_n_s_nsq, __pyx_n_s_offset, __pyx_n_s_covMat_m_New, __pyx_n_s_covMat_m_New_save, __pyx_n_s_gamma_m_New, __pyx_n_s_logProbPart1, __pyx_n_s_logProb, __pyx_n_s_logProbOld, __pyx_n_s_dice, __pyx_n_s_cov, __pyx_n_s_gamma_m_New_log); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(2, 0, 39, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_estimate_gamma_m_pyx, __pyx_n_s_estimate_gamma_m, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 19, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__28 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
__pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_float_0_05 = PyFloat_FromDouble(0.05); if (unlikely(!__pyx_float_0_05)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__pyx_t_1 = PyImport_ImportModule("scipy.linalg.cython_lapack"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_1, "dpotrf", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_dpotrf, "void (char *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_1, "dpotri", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_dpotri, "void (char *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_ImportFunction(__pyx_t_1, "dpotrs", (void (**)(void))&__pyx_f_5scipy_6linalg_13cython_lapack_dpotrs, "void (char *, int *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, __pyx_t_5scipy_6linalg_13cython_lapack_d *, int *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error)
Py_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initestimate_gamma_m(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initestimate_gamma_m(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_estimate_gamma_m(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_estimate_gamma_m(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_estimate_gamma_m(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
static PyThread_type_lock __pyx_t_3[8];
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'estimate_gamma_m' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_estimate_gamma_m(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("estimate_gamma_m", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_estimate_gamma_m) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "estimate_gamma_m")) {
if (unlikely(PyDict_SetItemString(modules, "estimate_gamma_m", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error;
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error;
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
if (unlikely(__Pyx_modinit_function_import_code() != 0)) goto __pyx_L1_error;
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "estimate_gamma_m.pyx":2
* #cython: boundscheck=False, wraparound=False, language_level=3
* from scipy import linalg as spl # <<<<<<<<<<<<<<
*
* import random
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_linalg);
__Pyx_GIVEREF(__pyx_n_s_linalg);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_linalg);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_linalg); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_spl, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "estimate_gamma_m.pyx":4
* from scipy import linalg as spl
*
* import random # <<<<<<<<<<<<<<
* import numpy as np
* #cimport numpy as cnp
*/
__pyx_t_2 = __Pyx_Import(__pyx_n_s_random, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_random, __pyx_t_2) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "estimate_gamma_m.pyx":5
*
* import random
* import numpy as np # <<<<<<<<<<<<<<
* #cimport numpy as cnp
*
*/
__pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 5, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "estimate_gamma_m.pyx":8
* #cimport numpy as cnp
*
* from tools import * # <<<<<<<<<<<<<<
*
* from cython.parallel cimport prange
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s__20);
__Pyx_GIVEREF(__pyx_n_s__20);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s__20);
__pyx_t_1 = __Pyx_Import(__pyx_n_s_tools, __pyx_t_2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_import_star(__pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "estimate_gamma_m.pyx":16
* from scipy.linalg.cython_lapack cimport dpotrf, dpotrs, dpotri
*
* import state # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_state, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_state, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "estimate_gamma_m.pyx":19
*
*
* def estimate_gamma_m(bGammaHm=0.05,bGammaFm=0.05): # <<<<<<<<<<<<<<
*
* cdef double[:,:] dist = state.dist
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_16estimate_gamma_m_1estimate_gamma_m, NULL, __pyx_n_s_estimate_gamma_m); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_estimate_gamma_m, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "estimate_gamma_m.pyx":1
* #cython: boundscheck=False, wraparound=False, language_level=3 # <<<<<<<<<<<<<<
* from scipy import linalg as spl
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_3[0] = PyThread_allocate_lock();
__pyx_t_3[1] = PyThread_allocate_lock();
__pyx_t_3[2] = PyThread_allocate_lock();
__pyx_t_3[3] = PyThread_allocate_lock();
__pyx_t_3[4] = PyThread_allocate_lock();
__pyx_t_3[5] = PyThread_allocate_lock();
__pyx_t_3[6] = PyThread_allocate_lock();
__pyx_t_3[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init estimate_gamma_m", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init estimate_gamma_m");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto __PYX_BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto __PYX_BAD;
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_int(const char *itemp) {
return (PyObject *) __Pyx_PyInt_From_int(*(int *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_int(const char *itemp, PyObject *obj) {
int value = __Pyx_PyInt_As_int(obj);
if ((value == (int)-1) && PyErr_Occurred())
return 0;
*(int *) itemp = value;
return 1;
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CStringEquals */
static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) {
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
return *s1 == *s2;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* FunctionImport */
#ifndef __PYX_HAVE_RT_ImportFunction
#define __PYX_HAVE_RT_ImportFunction
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, funcname);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C function %.200s",
PyModule_GetName(module), funcname);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
goto bad;
}
tmp.p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, desc);
goto bad;
}
tmp.p = PyCObject_AsVoidPtr(cobj);}
#endif
*f = tmp.fp;
if (!(*f))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
kmp_csupport.c | /*
* kmp_csupport.c -- kfront linkage support for OpenMP.
* $Revision: 43473 $
* $Date: 2014-09-26 15:02:57 -0500 (Fri, 26 Sep 2014) $
*/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
#include "omp.h" /* extern "C" declarations of user-visible routines */
#include "kmp.h"
#include "kmp_i18n.h"
#include "kmp_itt.h"
#include "kmp_error.h"
#include "kmp_stats.h"
#define MAX_MESSAGE 512
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/* flags will be used in future, e.g., to implement */
/* openmp_strict library restrictions */
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc in source location information
* @param flags in for future use (currently ignored)
*
* Initialize the runtime library. This call is optional; if it is not made then
* it will be implicitly called by attempts to use other library functions.
*
*/
void
__kmpc_begin(ident_t *loc, kmp_int32 flags)
{
// By default __kmp_ignore_mppbeg() returns TRUE.
if (__kmp_ignore_mppbeg() == FALSE) {
__kmp_internal_begin();
KC_TRACE( 10, ("__kmpc_begin: called\n" ) );
}
}
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc source location information
*
* Shutdown the runtime library. This is also optional, and even if called will not
* do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero.
*/
void
__kmpc_end(ident_t *loc)
{
// By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op.
// However, this can be overridden with KMP_IGNORE_MPPEND environment variable.
// If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end()
// will unregister this root (it can cause library shut down).
if (__kmp_ignore_mppend() == FALSE) {
KC_TRACE( 10, ("__kmpc_end: called\n" ) );
KA_TRACE( 30, ("__kmpc_end\n" ));
__kmp_internal_end_thread( -1 );
}
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The global thread index of the active thread.
This function can be called in any context.
If the runtime has ony been entered at the outermost level from a
single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that
which would be returned by @ref omp_get_thread_num() in the outermost
active parallel construct. (Or zero if there is no active parallel
construct, since the master thread is necessarily thread zero).
If multiple non-OpenMP threads all enter an OpenMP construct then this
will be a unique thread identifier among all the threads created by
the OpenMP runtime (but the value cannote be defined in terms of
OpenMP thread ids returned by omp_get_thread_num()).
*/
kmp_int32
__kmpc_global_thread_num(ident_t *loc)
{
kmp_int32 gtid = __kmp_entry_gtid();
KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) );
return gtid;
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads under control of the OpenMP<sup>*</sup> runtime
This function can be called in any context.
It returns the total number of threads under the control of the OpenMP runtime. That is
not a number that can be determined by any OpenMP standard calls, since the library may be
called from more than one non-OpenMP thread, and this reflects the total over all such calls.
Similarly the runtime maintains underlying threads even when they are not active (since the cost
of creating and destroying OS threads is high), this call counts all such threads even if they are not
waiting for work.
*/
kmp_int32
__kmpc_global_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) );
return TCR_4(__kmp_nth);
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The thread number of the calling thread in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_thread_num(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) );
return __kmp_tid_from_gtid( __kmp_entry_gtid() );
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) );
return __kmp_entry_thread() -> th.th_team -> t.t_nproc;
}
/*!
* @ingroup DEPRECATED
* @param loc location description
*
* This function need not be called. It always returns TRUE.
*/
kmp_int32
__kmpc_ok_to_fork(ident_t *loc)
{
#ifndef KMP_DEBUG
return TRUE;
#else
const char *semi2;
const char *semi3;
int line_no;
if (__kmp_par_range == 0) {
return TRUE;
}
semi2 = loc->psource;
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2, ';');
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2 + 1, ';');
if (semi2 == NULL) {
return TRUE;
}
if (__kmp_par_range_filename[0]) {
const char *name = semi2 - 1;
while ((name > loc->psource) && (*name != '/') && (*name != ';')) {
name--;
}
if ((*name == '/') || (*name == ';')) {
name++;
}
if (strncmp(__kmp_par_range_filename, name, semi2 - name)) {
return __kmp_par_range < 0;
}
}
semi3 = strchr(semi2 + 1, ';');
if (__kmp_par_range_routine[0]) {
if ((semi3 != NULL) && (semi3 > semi2)
&& (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) {
return __kmp_par_range < 0;
}
}
if (sscanf(semi3 + 1, "%d", &line_no) == 1) {
if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) {
return __kmp_par_range > 0;
}
return __kmp_par_range < 0;
}
return TRUE;
#endif /* KMP_DEBUG */
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return 1 if this thread is executing inside an active parallel region, zero if not.
*/
kmp_int32
__kmpc_in_parallel( ident_t *loc )
{
return __kmp_entry_thread() -> th.th_root -> r.r_active;
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_threads number of threads requested for this parallel construct
Set the number of threads to be used by the next fork spawned by this thread.
This call is only required if the parallel construct has a `num_threads` clause.
*/
void
__kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
global_tid, num_threads ) );
__kmp_push_num_threads( loc, global_tid, num_threads );
}
void
__kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid )
{
KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) );
/* the num_threads are automatically popped */
}
#if OMP_40_ENABLED
void
__kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind )
{
KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n",
global_tid, proc_bind ) );
__kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind );
}
#endif /* OMP_40_ENABLED */
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined parallel construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
KMP_STOP_EXPLICIT_TIMER(OMP_serial);
KMP_COUNT_BLOCK(OMP_PARALLEL);
int gtid = __kmp_entry_gtid();
// maybe to save thr_state is enough here
{
va_list ap;
va_start( ap, microtask );
#if INCLUDE_SSC_MARKS
SSC_MARK_FORKING();
#endif
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
VOLATILE_CAST(microtask_t) microtask,
VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
#if INCLUDE_SSC_MARKS
SSC_MARK_JOINING();
#endif
__kmp_join_call( loc, gtid );
va_end( ap );
}
KMP_START_EXPLICIT_TIMER(OMP_serial);
}
#if OMP_40_ENABLED
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_teams number of teams requested for the teams construct
Set the number of teams to be used by the teams construct.
This call is only required if the teams construct has a `num_teams` clause
or a `thread_limit` clause (or both).
*/
void
__kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
global_tid, num_teams, num_threads ) );
__kmp_push_num_teams( loc, global_tid, num_teams, num_threads );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined teams construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
int gtid = __kmp_entry_gtid();
kmp_info_t *this_thr = __kmp_threads[ gtid ];
va_list ap;
va_start( ap, microtask );
// remember teams entry point and nesting level
this_thr->th.th_teams_microtask = microtask;
this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host
// check if __kmpc_push_num_teams called, set default number of teams otherwise
if ( this_thr->th.th_teams_size.nteams == 0 ) {
__kmp_push_num_teams( loc, gtid, 0, 0 );
}
KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
VOLATILE_CAST(microtask_t) __kmp_teams_master,
VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
__kmp_join_call( loc, gtid );
this_thr->th.th_teams_microtask = NULL;
this_thr->th.th_teams_level = 0;
*(kmp_int64*)(&this_thr->th.th_teams_size) = 0L;
va_end( ap );
}
#endif /* OMP_40_ENABLED */
//
// I don't think this function should ever have been exported.
// The __kmpc_ prefix was misapplied. I'm fairly certain that no generated
// openmp code ever called it, but it's been exported from the RTL for so
// long that I'm afraid to remove the definition.
//
int
__kmpc_invoke_task_func( int gtid )
{
return __kmp_invoke_task_func( gtid );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Enter a serialized parallel construct. This interface is used to handle a
conditional parallel region, like this,
@code
#pragma omp parallel if (condition)
@endcode
when the condition is false.
*/
void
__kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
__kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with
* kmp_fork_call since the tasks to be done are similar in each case.
*/
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Leave a serialized parallel construct.
*/
void
__kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
kmp_internal_control_t *top;
kmp_info_t *this_thr;
kmp_team_t *serial_team;
KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) );
/* skip all this code for autopar serialized loops since it results in
unacceptable overhead */
if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) )
return;
// Not autopar code
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
this_thr = __kmp_threads[ global_tid ];
serial_team = this_thr->th.th_serial_team;
KMP_MB();
KMP_DEBUG_ASSERT( serial_team );
KMP_ASSERT( serial_team -> t.t_serialized );
KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team );
KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr );
/* If necessary, pop the internal control stack values and replace the team values */
top = serial_team -> t.t_control_stack_top;
if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) {
copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top );
serial_team -> t.t_control_stack_top = top -> next;
__kmp_free(top);
}
//if( serial_team -> t.t_serialized > 1 )
serial_team -> t.t_level--;
/* pop dispatch buffers stack */
KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer);
{
dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer;
serial_team->t.t_dispatch->th_disp_buffer =
serial_team->t.t_dispatch->th_disp_buffer->next;
__kmp_free( disp_buffer );
}
-- serial_team -> t.t_serialized;
if ( serial_team -> t.t_serialized == 0 ) {
/* return to the parallel section */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) {
__kmp_clear_x87_fpu_status_word();
__kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word );
__kmp_load_mxcsr( &serial_team->t.t_mxcsr );
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
this_thr -> th.th_team = serial_team -> t.t_parent;
this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid;
/* restore values cached in the thread */
this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */
this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */
this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized;
/* TODO the below shouldn't need to be adjusted for serialized teams */
this_thr -> th.th_dispatch = & this_thr -> th.th_team ->
t.t_dispatch[ serial_team -> t.t_master_tid ];
__kmp_pop_current_task_from_thread( this_thr );
KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 );
this_thr -> th.th_current_task -> td_flags.executing = 1;
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
//
// Copy the task team from the new child / old parent team
// to the thread. If non-NULL, copy the state flag also.
//
if ( ( this_thr -> th.th_task_team = this_thr -> th.th_team -> t.t_task_team ) != NULL ) {
this_thr -> th.th_task_state = this_thr -> th.th_task_team -> tt.tt_state;
}
KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n",
global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) );
}
} else {
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n",
global_tid, serial_team, serial_team -> t.t_serialized ) );
}
}
#if USE_ITT_BUILD
kmp_uint64 cur_time = 0;
#if USE_ITT_NOTIFY
if( __itt_get_timestamp_ptr ) {
cur_time = __itt_get_timestamp();
}
#endif /* USE_ITT_NOTIFY */
// Report the barrier
if( ( __kmp_forkjoin_frames_mode == 1 || __kmp_forkjoin_frames_mode == 3 ) && __itt_frame_submit_v3_ptr ) {
if( this_thr->th.th_team->t.t_level == 0 ) {
__kmp_itt_frame_submit( global_tid, this_thr->th.th_frame_time_serialized, cur_time, 0, loc, this_thr->th.th_team_nproc, 0 );
}
}
// Mark the end of the "parallel" region for VTune. Only use one of frame notification scheme at the moment.
if ( ( __itt_frame_end_v3_ptr && __kmp_forkjoin_frames && ! __kmp_forkjoin_frames_mode ) || KMP_ITT_DEBUG )
{
this_thr->th.th_ident = loc;
__kmp_itt_region_joined( global_tid, 1 );
}
if ( ( __itt_frame_submit_v3_ptr && __kmp_forkjoin_frames_mode == 3 ) || KMP_ITT_DEBUG )
{
this_thr->th.th_ident = loc;
// Since barrier frame for serialized region is equal to the region we use the same begin timestamp as for the barrier.
__kmp_itt_frame_submit( global_tid, serial_team->t.t_region_time, cur_time, 0, loc, this_thr->th.th_team_nproc, 2 );
}
#endif /* USE_ITT_BUILD */
if ( __kmp_env_consistency_check )
__kmp_pop_parallel( global_tid, NULL );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information.
@param ... pointers to the variables to be synchronized.
Execute <tt>flush</tt>. The pointers to the variables to be flushed
need not actually be passed, (indeed unless this is a zero terminated
list they can't be since there's no count here so we don't know how
many there are!). This is implemented as a full memory fence. (Though
depending on the memory ordering convention obeyed by the compiler
even that may not be necessary).
*/
void
__kmpc_flush(ident_t *loc, ...)
{
KC_TRACE( 10, ("__kmpc_flush: called\n" ) );
/* need explicit __mf() here since use volatile instead in library */
KMP_MB(); /* Flush all pending memory write invalidates. */
#if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
#if KMP_MIC
// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
// We shouldn't need it, though, since the ABI rules require that
// * If the compiler generates NGO stores it also generates the fence
// * If users hand-code NGO stores they should insert the fence
// therefore no incomplete unordered stores should be visible.
#else
// C74404
// This is to address non-temporal store instructions (sfence needed).
// The clflush instruction is addressed either (mfence needed).
// Probably the non-temporal load monvtdqa instruction should also be addressed.
// mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2.
if ( ! __kmp_cpuinfo.initialized ) {
__kmp_query_cpuid( & __kmp_cpuinfo );
}; // if
if ( ! __kmp_cpuinfo.sse2 ) {
// CPU cannot execute SSE2 instructions.
} else {
#if KMP_COMPILER_ICC || KMP_COMPILER_MSVC
_mm_mfence();
#else
__sync_synchronize();
#endif // KMP_COMPILER_ICC
}; // if
#endif // KMP_MIC
#elif KMP_ARCH_ARM
// Nothing yet
#elif KMP_ARCH_PPC64
// Nothing needed here (we have a real MB above).
#if KMP_OS_CNK
// The flushing thread needs to yield here; this prevents a
// busy-waiting thread from saturating the pipeline. flush is
// often used in loops like this:
// while (!flag) {
// #pragma omp flush(flag)
// }
// and adding the yield here is good for at least a 10x speedup
// when running >2 threads per core (on the NAS LU benchmark).
__kmp_yield(TRUE);
#endif
#elif KMP_ARCH_RISCV
// Nothing yet : TODO; add fence
#else
#error Unknown or unsupported architecture
#endif
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Execute a barrier.
*/
void
__kmpc_barrier(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_BARRIER);
KMP_TIME_BLOCK(OMP_barrier);
int explicit_barrier_flag;
KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}; // if
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
__kmp_threads[ global_tid ]->th.th_ident = loc;
// TODO: explicit barrier_wait_id:
// this function is called when 'barrier' directive is present or
// implicit barrier at the end of a worksharing construct.
// 1) better to add a per-thread barrier counter to a thread data structure
// 2) set to 0 when a new team is created
// 4) no sync is required
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
}
/* The BARRIER for a MASTER section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise.
*/
kmp_int32
__kmpc_master(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_MASTER);
int status = 0;
KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) );
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
if( KMP_MASTER_GTID( global_tid ))
status = 1;
if ( __kmp_env_consistency_check ) {
if (status)
__kmp_push_sync( global_tid, ct_master, loc, NULL );
else
__kmp_check_sync( global_tid, ct_master, loc, NULL );
}
return status;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
Mark the end of a <tt>master</tt> region. This should only be called by the thread
that executes the <tt>master</tt> region.
*/
void
__kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) );
KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid ));
if ( __kmp_env_consistency_check ) {
if( global_tid < 0 )
KMP_WARNING( ThreadIdentInvalid );
if( KMP_MASTER_GTID( global_tid ))
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
Start execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KMP_DEBUG_ASSERT( __kmp_init_serial );
KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
#if USE_ITT_BUILD
__kmp_itt_ordered_prep( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
if ( th -> th.th_dispatch -> th_deo_fcn != 0 )
(*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_deo( & gtid, & cid, loc );
#if USE_ITT_BUILD
__kmp_itt_ordered_start( gtid );
#endif /* USE_ITT_BUILD */
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
End execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) );
#if USE_ITT_BUILD
__kmp_itt_ordered_end( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
if ( th -> th.th_dispatch -> th_dxo_fcn != 0 )
(*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_dxo( & gtid, & cid, loc );
}
static kmp_user_lock_p
__kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid )
{
kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit;
//
// Because of the double-check, the following load
// doesn't need to be volatile.
//
kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
if ( lck == NULL ) {
void * idx;
// Allocate & initialize the lock.
// Remember allocated locks in table in order to free them in __kmp_cleanup()
lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section );
__kmp_init_user_lock_with_checks( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_creating( lck );
// __kmp_itt_critical_creating() should be called *before* the first usage of underlying
// lock. It is the only place where we can guarantee it. There are chances the lock will
// destroyed with no usage, but it is not a problem, because this is not real event seen
// by user but rather setting name for object (lock). See more details in kmp_itt.h.
#endif /* USE_ITT_BUILD */
//
// Use a cmpxchg instruction to slam the start of the critical
// section with the lock pointer. If another thread beat us
// to it, deallocate the lock, and use the lock that the other
// thread allocated.
//
int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck );
if ( status == 0 ) {
// Deallocate the lock and reload the value.
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed( lck );
// Let ITT know the lock is destroyed and the same memory location may be reused for
// another purpose.
#endif /* USE_ITT_BUILD */
__kmp_destroy_user_lock_with_checks( lck );
__kmp_user_lock_free( &idx, gtid, lck );
lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
KMP_DEBUG_ASSERT( lck != NULL );
}
}
return lck;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Enter code protected by a `critical` construct.
This function blocks until the executing thread can enter the critical section.
*/
void
__kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
KMP_COUNT_BLOCK(OMP_CRITICAL);
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
//TODO: add THR_OVHD_STATE
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
/* since the critical directive binds to all threads, not just
* the current team we have to check this even if we are in a
* serialized team */
/* also, even if we are the uber thread, we still have to conduct the lock,
* as we have to contend with sibling threads */
#if USE_ITT_BUILD
__kmp_itt_critical_acquiring( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_acquire_user_lock_with_checks( lck, global_tid );
#if USE_ITT_BUILD
__kmp_itt_critical_acquired( lck );
#endif /* USE_ITT_BUILD */
KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid ));
} // __kmpc_critical
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Leave a critical section, releasing any lock that was held during its execution.
*/
void
__kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
{
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid ));
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit));
}
KMP_ASSERT(lck != NULL);
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_release_user_lock_with_checks( lck, global_tid );
KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid ));
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master. The barrier is executed inside this function.
*/
kmp_int32
__kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
int status;
KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check )
__kmp_check_barrier( global_tid, ct_barrier, loc );
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL );
return (status != 0) ? 0 : 1;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Complete the execution of a combined barrier and master. This function should
only be called at the completion of the <tt>master</tt> code. Other threads will
still be waiting at the barrier and this call releases them.
*/
void
__kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid ));
__kmp_end_split_barrier ( bs_plain_barrier, global_tid );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master(nowait) construct.
The barrier is executed inside this function.
There is no equivalent "end" function, since the
*/
kmp_int32
__kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid )
{
kmp_int32 ret;
KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
ret = __kmpc_master (loc, global_tid);
if ( __kmp_env_consistency_check ) {
/* there's no __kmpc_end_master called; so the (stats) */
/* actions of __kmpc_end_master are done here */
if ( global_tid < 0 ) {
KMP_WARNING( ThreadIdentInvalid );
}
if (ret) {
/* only one thread should do the pop since only */
/* one did the push (see __kmpc_master()) */
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
return (ret);
}
/* The BARRIER for a SINGLE process section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
@return One if this thread should execute the single construct, zero otherwise.
Test whether to execute a <tt>single</tt> construct.
There are no implicit barriers in the two "single" calls, rather the compiler should
introduce an explicit barrier if it is required.
*/
kmp_int32
__kmpc_single(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_SINGLE);
kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE );
return rc;
}
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
Mark the end of a <tt>single</tt> construct. This function should
only be called by the thread that executed the block of code protected
by the `single` construct.
*/
void
__kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
{
__kmp_exit_single( global_tid );
}
/*!
@ingroup WORK_SHARING
@param loc Source location
@param global_tid Global thread id
Mark the end of a statically scheduled loop.
*/
void
__kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid )
{
KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid));
if ( __kmp_env_consistency_check )
__kmp_pop_workshare( global_tid, ct_pdo, loc );
}
/*
* User routines which take C-style arguments (call by value)
* different from the Fortran equivalent routines
*/
void
ompc_set_num_threads( int arg )
{
// !!!!! TODO: check the per-task binding
__kmp_set_num_threads( arg, __kmp_entry_gtid() );
}
void
ompc_set_dynamic( int flag )
{
kmp_info_t *thread;
/* For the thread-private implementation of the internal controls */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__dynamic( thread, flag ? TRUE : FALSE );
}
void
ompc_set_nested( int flag )
{
kmp_info_t *thread;
/* For the thread-private internal controls implementation */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__nested( thread, flag ? TRUE : FALSE );
}
void
ompc_set_max_active_levels( int max_active_levels )
{
/* TO DO */
/* we want per-task implementation of this internal control */
/* For the per-thread internal controls implementation */
__kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels );
}
void
ompc_set_schedule( omp_sched_t kind, int modifier )
{
// !!!!! TODO: check the per-task binding
__kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier );
}
int
ompc_get_ancestor_thread_num( int level )
{
return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level );
}
int
ompc_get_team_size( int level )
{
return __kmp_get_team_size( __kmp_entry_gtid(), level );
}
void
kmpc_set_stacksize( int arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_stacksize_s( size_t arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_blocktime( int arg )
{
int gtid, tid;
kmp_info_t *thread;
gtid = __kmp_entry_gtid();
tid = __kmp_tid_from_gtid(gtid);
thread = __kmp_thread_from_gtid(gtid);
__kmp_aux_set_blocktime( arg, thread, tid );
}
void
kmpc_set_library( int arg )
{
// __kmp_user_set_library initializes the library if needed
__kmp_user_set_library( (enum library_type)arg );
}
void
kmpc_set_defaults( char const * str )
{
// __kmp_aux_set_defaults initializes the library if needed
__kmp_aux_set_defaults( str, strlen( str ) );
}
int
kmpc_set_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_set_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_unset_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_unset_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_get_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_get_affinity_mask_proc( proc, mask );
#endif
}
/* -------------------------------------------------------------------------- */
/*!
@ingroup THREADPRIVATE
@param loc source location information
@param gtid global thread number
@param cpy_size size of the cpy_data buffer
@param cpy_data pointer to data to be copied
@param cpy_func helper function to call for copying data
@param didit flag variable: 1=single thread; 0=not single thread
__kmpc_copyprivate implements the interface for the private data broadcast needed for
the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran).
All threads participating in the parallel region call this routine.
One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1
and all other threads should have that variable set to 0.
All threads pass a pointer to a data buffer (cpy_data) that they have built.
The OpenMP specification forbids the use of nowait on the single region when a copyprivate
clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid
race conditions, so the code generation for the single region should avoid generating a barrier
after the call to @ref __kmpc_copyprivate.
The <tt>gtid</tt> parameter is the global thread id for the current thread.
The <tt>loc</tt> parameter is a pointer to source location information.
Internal implementation: The single thread will first copy its descriptor address (cpy_data)
to a team-private location, then the other threads will each call the function pointed to by
the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer.
The cpy_func routine used for the copy and the contents of the data area defined by cpy_data
and cpy_size may be built in any fashion that will allow the copy to be done. For instance,
the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers
to the data. The cpy_func routine must interpret the cpy_data buffer appropriately.
The interface to cpy_func is as follows:
@code
void cpy_func( void *destination, void *source )
@endcode
where void *destination is the cpy_data pointer for the thread being copied to
and void *source is the cpy_data pointer for the thread being copied from.
*/
void
__kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit )
{
void **data_ptr;
KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid ));
KMP_MB();
data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data;
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid );
}
}
/* ToDo: Optimize the following two barriers into some kind of split barrier */
if (didit) *data_ptr = cpy_data;
/* This barrier is not a barrier region boundary */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
if (! didit) (*cpy_func)( cpy_data, *data_ptr );
/* Consider next barrier the user-visible barrier for barrier region boundaries */
/* Nesting checks are already handled by the single construct checks */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location)
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
}
/* -------------------------------------------------------------------------- */
#define INIT_LOCK __kmp_init_user_lock_with_checks
#define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks
#define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks
#define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed
#define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks
#define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed
#define RELEASE_LOCK __kmp_release_user_lock_with_checks
#define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks
#define TEST_LOCK __kmp_test_user_lock_with_checks
#define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks
#define DESTROY_LOCK __kmp_destroy_user_lock_with_checks
#define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks
/*
* TODO: Make check abort messages use location info & pass it
* into with_checks routines
*/
/* initialize the lock */
void
__kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
static char const * const func = "omp_init_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
} // __kmpc_init_lock
/* initialize the lock */
void
__kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
static char const * const func = "omp_init_nest_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_NESTED_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
} // __kmpc_init_nest_lock
void
__kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
} // __kmpc_destroy_lock
/* destroy the lock */
void
__kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_NESTED_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
} // __kmpc_destroy_nest_lock
void
__kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
KMP_COUNT_BLOCK(OMP_set_lock);
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
}
void
__kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
}
void
__kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
/* release the lock */
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
// "fast" path implemented to fix customer performance issue
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0);
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
RELEASE_LOCK( lck, gtid );
}
/* release the lock */
void
__kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
// "fast" path implemented to fix customer performance issue
kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock;
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
if ( --(tl->lk.depth_locked) == 0 ) {
TCW_4(tl->lk.poll, 0);
}
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
RELEASE_NESTED_LOCK( lck, gtid );
}
/* try to acquire the lock */
int
__kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
KMP_COUNT_BLOCK(OMP_test_lock);
KMP_TIME_BLOCK(OMP_test_lock);
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return ( rc ? FTN_TRUE : FTN_FALSE );
/* Can't use serial interval since not block structured */
}
/* try to acquire the lock */
int
__kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_RISCV)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return rc;
/* Can't use serial interval since not block structured */
}
/*--------------------------------------------------------------------------------------------------------------------*/
/*
* Interface to fast scalable reduce methods routines
*/
// keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions;
// another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then)
// AT: which solution is better?
#define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \
( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) )
#define __KMP_GET_REDUCTION_METHOD(gtid) \
( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method )
// description of the packed_reduction_method variable: look at the macros in kmp.h
// used in a critical section reduce block
static __forceinline void
__kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
// this lock was visible to a customer and to the thread profiler as a serial overhead span
// (although it's used for an internal purpose only)
// why was it visible in previous implementation?
// should we keep it visible in new reduce block?
kmp_user_lock_p lck;
// We know that the fast reduction code is only emitted by Intel compilers
// with 32 byte critical sections. If there isn't enough space, then we
// have to use a pointer.
if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) {
lck = (kmp_user_lock_p)crit;
}
else {
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
KMP_DEBUG_ASSERT( lck != NULL );
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
__kmp_acquire_user_lock_with_checks( lck, global_tid );
}
// used in a critical section reduce block
static __forceinline void
__kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
kmp_user_lock_p lck;
// We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical
// sections. If there isn't enough space, then we have to use a pointer.
if ( __kmp_base_user_lock_size > 32 ) {
lck = *( (kmp_user_lock_p *) crit );
KMP_ASSERT( lck != NULL );
} else {
lck = (kmp_user_lock_p) crit;
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
__kmp_release_user_lock_with_checks( lck, global_tid );
} // __kmp_end_critical_section_reduce_block
/* 2.a.i. Reduce Block without a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
The nowait version is used for a reduce clause with the nowait argument.
*/
kmp_int32
__kmpc_reduce_nowait(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck ) {
KMP_COUNT_BLOCK(REDUCE_nowait);
int retval;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
#if OMP_40_ENABLED
kmp_team_t *team;
kmp_info_t *th;
int teams_swapped = 0, task_state;
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be used as a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
#if OMP_40_ENABLED
th = __kmp_thread_from_gtid(global_tid);
if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct?
team = th->th.th_team;
if( team->t.t_level == th->th.th_teams_level ) {
// this is reduction at teams construct
KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0
// Let's swap teams temporarily for the reduction barrier
teams_swapped = 1;
th->th.th_info.ds.ds_tid = team->t.t_master_tid;
th->th.th_team = team->t.t_parent;
th->th.th_task_team = th->th.th_team->t.t_task_team;
th->th.th_team_nproc = th->th.th_team->t.t_nproc;
task_state = th->th.th_task_state;
if( th->th.th_task_team )
th->th.th_task_state = th->th.th_task_team->tt.tt_state;
}
}
#endif // OMP_40_ENABLED
// packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable
// the variable should be either a construct-specific or thread-specific property, not a team specific property
// (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct)
// an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?)
// (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed)
// a thread-specific variable is better regarding two issues above (next construct and extra syncs)
// a thread-specific "th_local.reduction_method" variable is used currently
// each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs)
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
// all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen)
// (it's not quite good, because the checking block has been closed by this 'pop',
// but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction)
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//AT: performance issue: a real barrier here
//AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them)
//AT: (it's not what a customer might expect specifying NOWAIT clause)
//AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer)
//AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster
// and be more in line with sense of NOWAIT
//AT: TO DO: do epcc test and compare times
// this barrier should be invisible to a customer and to the thread profiler
// (it's neither a terminating barrier nor customer's code, it's used for an internal purpose)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers will get to __kmpc_end_reduce_nowait() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) {
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
#if OMP_40_ENABLED
if( teams_swapped ) {
// Restore thread structure
th->th.th_info.ds.ds_tid = 0;
th->th.th_team = team;
th->th.th_task_team = team->t.t_task_team;
th->th.th_team_nproc = team->t.t_nproc;
th->th.th_task_state = task_state;
}
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a reduce nowait.
*/
void
__kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( on Intel platforms only )
} else if( packed_reduction_method == atomic_reduce_block ) {
// neither master nor other workers should get here
// (code gen does not generate this call in case 2: atomic reduce block)
// actually it's better to remove this elseif at all;
// after removal this value will checked by the 'else' and will assert
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master gets here
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
/* 2.a.ii. Reduce Block with a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
A blocking reduce that includes an implicit barrier.
*/
kmp_int32
__kmpc_reduce(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck )
{
KMP_COUNT_BLOCK(REDUCE_wait);
int retval;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//case tree_reduce_block:
// this barrier should be visible to a customer and to the thread profiler
// (it's a terminating barrier on constructs if NOWAIT not specified)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers except master will enter __kmpc_end_reduce() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) { // 0: all other workers; 1: master
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a blocking reduce.
The <tt>lck</tt> pointer must be the same as that used in the corresponding start function.
*/
void
__kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
// this barrier should be visible to a customer and to the thread profiler
// (it's a terminating barrier on constructs if NOWAIT not specified)
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == atomic_reduce_block ) {
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master executes here (master releases all other workers)
__kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid );
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
#undef __KMP_GET_REDUCTION_METHOD
#undef __KMP_SET_REDUCTION_METHOD
/*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/
kmp_uint64
__kmpc_get_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
return thread->th.th_current_task->td_task_id;
} // __kmpc_get_taskid
kmp_uint64
__kmpc_get_parent_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
kmp_taskdata_t * parent_task;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
parent_task = thread->th.th_current_task->td_parent;
return ( parent_task == NULL ? 0 : parent_task->td_task_id );
} // __kmpc_get_parent_taskid
void __kmpc_place_threads(int nC, int nT, int nO)
{
#if KMP_MIC
if ( ! __kmp_init_serial ) {
__kmp_serial_initialize();
}
__kmp_place_num_cores = nC;
__kmp_place_num_threads_per_core = nT;
__kmp_place_core_offset = nO;
#endif
}
// end of file //
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) {
for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(16*t3+Nx+12,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),32*t4+30),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
familytree_par.c | #include "familytree.h"
#include<omp.h>
void traverse_tree(tree *node,int numThreads){
int father_iq, mother_iq;
if(node==NULL){
return 0;
}
else
{
#pragma omp task
{
father_iq = traverse_tree(node->father,numThreads);
}
#pragma omp task
{
mother_iq = traverse_tree(node->mother,numThreads);
}
node->IQ = compute_IQ(node->data,father_iq,mother_iq);
genius[node->id] = node->IQ;
}
}
int traverse(tree *node, int numThreads){
// TODO implement your solution in here.
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single
{
traverse_tree(node,numThreads);
}
}
return node->IQ;
}
|
GB_binop__min_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__min_uint16
// A.*B function (eWiseMult): GB_AemultB__min_uint16
// A*D function (colscale): GB_AxD__min_uint16
// D*A function (rowscale): GB_DxB__min_uint16
// C+=B function (dense accum): GB_Cdense_accumB__min_uint16
// C+=b function (dense accum): GB_Cdense_accumb__min_uint16
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_uint16
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_uint16
// C=scalar+B GB_bind1st__min_uint16
// C=scalar+B' GB_bind1st_tran__min_uint16
// C=A+scalar GB_bind2nd__min_uint16
// C=A'+scalar GB_bind2nd_tran__min_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IMIN (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT16 || GxB_NO_MIN_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__min_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__min_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__min_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__min_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__min_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__min_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__min_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__min_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__min_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__min_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB_bind1st_tran__min_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__min_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireVirtualCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickCoreSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there a better way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,
&average_image->exception);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
weight = 0;
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
if (pixel->matte != MagickFalse)
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->matte != MagickFalse)
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major= MagickMaximumValue;
else
Major= sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsMagickTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) )
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/threshold.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory(
(size_t) (width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(double) (1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(blur_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
normalize,
**kernel;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) < MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brightness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image,exception);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image,exception);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
const Quantum
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
const Quantum
*magick_restrict p;
ssize_t
i;
ssize_t
center,
j;
j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5));
if (j < 0)
j=0;
else
if (j > (ssize_t) width)
j=(ssize_t) width;
if ((j & 0x01) != 0)
j--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y-
(ssize_t) ((width-j)/2L),width-j,width-j,exception);
if (p == (const Quantum *) NULL)
break;
center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+
GetPixelChannels(image)*((width-j)/2);
for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
sharp_traits,
traits;
const double
*magick_restrict k;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sharp_traits=GetPixelChannelTraits(sharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sharp_traits == UndefinedPixelTrait))
continue;
if ((sharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(sharp_image,channel,p[center+i],q);
continue;
}
k=kernel[j];
pixels=p;
pixel=0.0;
gamma=0.0;
if ((sharp_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) (width-j); v++)
{
for (u=0; u < (ssize_t) (width-j); u++)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
k++;
pixels+=GetPixelChannels(image);
}
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(sharp_image);
r+=GetPixelChannels(edge_image);
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MagickPathExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l a t e r a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing
% smoothing filter for images. It replaces the intensity of each pixel with
% a weighted average of intensity values from nearby pixels. This weight is
% based on a Gaussian distribution. The weights depend not only on Euclidean
% distance of pixels, but also on the radiometric differences (e.g., range
% differences, such as color intensity, depth distance, etc.). This preserves
% sharp edges.
%
% The format of the BilateralBlurImage method is:
%
% Image *BilateralBlurImage(const Image *image,const size_t width,
% const size_t height,const double intensity_sigma,
% const double spatial_sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the neighborhood in pixels.
%
% o height: the height of the neighborhood in pixels.
%
% o intensity_sigma: sigma in the intensity space. A larger value means
% that farther colors within the pixel neighborhood (see spatial_sigma)
% will be mixed together, resulting in larger areas of semi-equal color.
%
% o spatial_sigma: sigma in the coordinate space. A larger value means that
% farther pixels influence each other as long as their colors are close
% enough (see intensity_sigma ). When the neigborhood diameter is greater
% than zero, it specifies the neighborhood size regardless of
% spatial_sigma. Otherwise, the neigborhood diameter is proportional to
% spatial_sigma.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double BlurDistance(const ssize_t x,const ssize_t y,
const ssize_t u,const ssize_t v)
{
return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v)));
}
static inline double BlurGaussian(const double x,const double sigma)
{
return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))*
PerceptibleReciprocal(Magick2PI*sigma*sigma));
}
static double **DestroyBilateralThreadSet(const ssize_t number_threads,
double **weights)
{
ssize_t
i;
assert(weights != (double **) NULL);
for (i=0; i <= (ssize_t) number_threads; i++)
if (weights[i] != (double *) NULL)
weights[i]=(double *) RelinquishMagickMemory(weights[i]);
weights=(double **) RelinquishMagickMemory(weights);
return(weights);
}
static double **AcquireBilateralThreadSet(const size_t number_threads,
const size_t width,const size_t height)
{
double
**weights;
ssize_t
i;
weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights));
if (weights == (double **) NULL)
return((double **) NULL);
(void) memset(weights,0,number_threads*sizeof(*weights));
for (i=0; i <= (ssize_t) number_threads; i++)
{
weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights));
if (weights[i] == (double *) NULL)
return(DestroyBilateralThreadSet(number_threads,weights));
}
return(weights);
}
MagickExport Image *BilateralBlurImage(const Image *image,const size_t width,
const size_t height,const double intensity_sigma,const double spatial_sigma,
ExceptionInfo *exception)
{
#define MaxIntensity (255)
#define BilateralBlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
intensity_gaussian[2*(MaxIntensity+1)],
*spatial_gaussian,
**weights;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
mid;
ssize_t
u;
ssize_t
n,
number_threads,
v;
ssize_t
i,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
weights=AcquireBilateralThreadSet(number_threads,width,height);
if (weights == (double **) NULL)
{
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=(-MaxIntensity); i < MaxIntensity; i++)
intensity_gaussian[i+MaxIntensity]=BlurGaussian((double) i,intensity_sigma);
spatial_gaussian=weights[number_threads];
n=0;
mid.x=(ssize_t) (width/2L);
mid.y=(ssize_t) (height/2L);
for (v=0; v < (ssize_t) height; v++)
for (u=0; u < (ssize_t) width; u++)
spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y),
spatial_sigma);
/*
Bilateral blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
gamma,
pixel;
const Quantum
*magick_restrict p,
*magick_restrict r;
ssize_t
i,
u;
ssize_t
n,
v;
/*
Tonal weighting preserves edges while smoothing in the flat regions.
*/
p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height,
exception);
if (p == (const Quantum *) NULL)
break;
p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)*
mid.x;
n=0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
intensity;
r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))-
(double) ScaleQuantumToChar(GetPixelIntensity(image,p));
if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity))
weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]*
spatial_gaussian[n];
else
weights[id][n]=BlurGaussian(intensity,intensity_sigma)*
BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma);
n++;
}
}
for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++)
{
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
pixel=0.0;
gamma=0.0;
n=0;
if ((blur_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n];
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
continue;
}
/*
Alpha blending.
*/
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
double
alpha,
beta;
r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+
GetPixelChannels(image)*(mid.x-u);
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=weights[id][n]*r[i];
gamma+=weights[id][n]*alpha*beta;
n++;
}
}
SetPixelChannel(blur_image,channel,ClampToQuantum(
PerceptibleReciprocal(gamma)*pixel),q);
}
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BilateralBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
weights=DestroyBilateralThreadSet(number_threads,weights);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,
const KernelInfo *kernel_info,ExceptionInfo *exception)
{
Image
*convolve_image;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImage(image,kernel_info,exception);
if (convolve_image != (Image *) NULL)
return(convolve_image);
#endif
convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,
exception);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickRealType
v;
ssize_t
i,
x;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) p[i];
if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
ssize_t
i,
x;
MagickRealType
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(MagickRealType) q[i];
if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) &&
((MagickRealType) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
ssize_t
i;
size_t
length;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image,exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(despeckle_image,DirectClass,exception);
if (status == MagickFalse)
{
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
despeckle_traits,
traits;
ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
despeckle_traits=GetPixelChannelTraits(despeckle_image,channel);
if ((traits == UndefinedPixelTrait) ||
(despeckle_traits == UndefinedPixelTrait))
continue;
if ((despeckle_traits & CopyPixelTrait) != 0)
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[j++]=p[i];
p+=GetPixelChannels(image);
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelChannel(despeckle_image,channel,pixels[j++],q);
q+=GetPixelChannels(despeckle_image);
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->width*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImage(emboss_image,exception);
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you.
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
char
geometry[MagickPathExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickRealType GetMeanLuma(const Image *magick_restrict image,
const double *magick_restrict pixel)
{
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kuwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse)
{
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,gaussian_image->rows,1)
#endif
for (y=0; y < (ssize_t) gaussian_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gaussian_image->columns; x++)
{
const Quantum
*magick_restrict p;
double
min_variance;
RectangleInfo
quadrant,
target;
size_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const Quantum
*magick_restrict k;
double
mean[MaxPixelChannels],
variance;
ssize_t
n;
ssize_t
j;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
case 3:
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const Quantum *) NULL)
break;
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]=0.0;
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]+=(double) k[j];
k+=GetPixelChannels(gaussian_image);
}
for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++)
mean[j]/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(gaussian_image,k);
variance+=(luma-GetMeanLuma(gaussian_image,mean))*
(luma-GetMeanLuma(gaussian_image,mean));
k+=GetPixelChannels(gaussian_image);
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double)
target.y+target.height/2.0,q,exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(kuwahara_image);
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanline,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanline_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse)
{
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanline));
if (scanline_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanline=(float *) GetVirtualMemoryBlob(scanline_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanline_info=RelinquishVirtualMemory(scanline_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(float) ((width+1)*(width+1));
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*out,
*pix,
*pixels;
ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p+=image->number_channels;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
float
*pix,
*pixels;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanline;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
PixelTrait
traits;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(contrast_image,ClampToQuantum((MagickRealType)
GetPixelRed(image,p)*mult),q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType)
GetPixelGreen(image,p)*mult),q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType)
GetPixelBlue(image,p)*mult),q);
p+=image->number_channels;
q+=contrast_image->number_channels;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanline_info=RelinquishVirtualMemory(scanline_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickRealType *GetMotionBlurKernel(const size_t width,
const double sigma)
{
MagickRealType
*kernel,
normalize;
ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view,
*motion_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
OffsetInfo
*offset;
PointInfo
point;
ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=CastDoubleToLong(ceil((double) (i*point.y)/
hypot(point.x,point.y)-0.5));
offset[i].y=CastDoubleToLong(ceil((double) (i*point.x)/
hypot(point.x,point.y)-0.5));
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception);
if (blur_image != (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return(blur_image);
}
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
motion_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
MagickRealType
*magick_restrict k;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
k=kernel;
pixel=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+
offset[j].y,1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=(*k)*r[i];
k++;
}
SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q);
continue;
}
alpha=0.0;
gamma=0.0;
for (j=0; j < (ssize_t) width; j++)
{
r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1,
1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,r));
pixel+=(*k)*alpha*r[i];
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
motion_view=DestroyCacheView(motion_view);
image_view=DestroyCacheView(image_view);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MagickPathExtent],
label[MagickPathExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception);
if (i == (NumberTiles/2))
{
(void) QueryColorCompliance("#dfdfdf",AllCompliance,
&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees,
2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0*
percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor,exception);
(void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImage(preview_image,gamma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image,exception);
(void) FormatLocaleString(label,MagickPathExtent,"colors %.20g",
(double) colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t)
radius,(size_t) radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MagickPathExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MagickPathExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MagickPathExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MagickPathExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MagickPathExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"Poisson",MagickPathExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,(double) (percentage*((double)
QuantumRange+1.0))/100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"threshold %g",
(double) (percentage*((double) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,image->interpolate,radius,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*percentage/
100.0,exception);
(void) FormatLocaleString(label,MagickPathExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees,
degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue,exception);
(void) FormatLocaleString(label,MagickPathExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold,exception);
(void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,image->interpolate,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,
image->interpolate,exception);
(void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5*
degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MagickPathExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MagickPathExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image,exception);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MagickPathExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MagickPathExtent,
"quality %s\n%.20gb ",factor,(double) ((MagickOffsetType)
GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
preview_image->alpha_trait=UndefinedPixelTrait;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label,exception);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,
MagickPathExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a radial blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o angle: the angle of the radial blur.
%
% o blur: the blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view,
*radial_view;
double
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
blur_center;
ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRotationalBlurImage(image,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(double) (n-1);
cos_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(double *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (double *) NULL) ||
(sin_theta == (double *) NULL))
{
if (cos_theta != (double *) NULL)
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (double *) NULL)
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(double) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
radial_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
radius;
PointInfo
center;
ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const Quantum
*magick_restrict r;
ssize_t
j;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[i],q);
continue;
}
gamma=0.0;
pixel=0.0;
if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) ||
(channel == AlphaPixelChannel))
{
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel+=r[i];
gamma++;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (j=0; j < (ssize_t) n; j+=(ssize_t) step)
{
double
alpha;
r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+
center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t)
(blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5),
1,1,exception);
if (r == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
alpha=(double) QuantumScale*GetPixelAlpha(image,r);
pixel+=alpha*r[i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(blur_image);
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
radial_view=DestroyCacheView(radial_view);
image_view=DestroyCacheView(image_view);
cos_theta=(double *) RelinquishMagickMemory(cos_theta);
sin_theta=(double *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
*kernel;
ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
width,width*sizeof(*kernel)));
if (kernel == (MagickRealType *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
const MagickRealType
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double)
*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace,exception);
if (status == MagickFalse)
{
luminance_image=DestroyImage(luminance_image);
blur_image=DestroyImage(blur_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)*
((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L));
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
contrast;
MagickBooleanType
sync;
const Quantum
*magick_restrict l,
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity;
ssize_t
i;
intensity=GetPixelIntensity(image,p+center);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
blur_traits,
traits;
const MagickRealType
*magick_restrict k;
const Quantum
*magick_restrict luminance_pixels,
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
blur_traits=GetPixelChannelTraits(blur_image,channel);
if ((traits == UndefinedPixelTrait) ||
(blur_traits == UndefinedPixelTrait))
continue;
if ((blur_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
k=kernel;
pixel=0.0;
pixels=p;
luminance_pixels=l;
gamma=0.0;
if ((blur_traits & BlendPixelTrait) == 0)
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,luminance_pixels)-
intensity;
if (fabs(contrast) < threshold)
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
continue;
}
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(image,pixels)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=(*k)*alpha*pixels[i];
gamma+=(*k)*alpha;
}
k++;
pixels+=GetPixelChannels(image);
luminance_pixels+=GetPixelChannels(luminance_image);
}
pixels+=GetPixelChannels(image)*image->columns;
luminance_pixels+=GetPixelChannels(luminance_image)*
luminance_image->columns;
}
if (fabs((double) gamma) < MagickEpsilon)
{
SetPixelChannel(blur_image,channel,p[center+i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
l+=GetPixelChannels(luminance_image);
q+=GetPixelChannels(blur_image);
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(MagickRealType *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
double
distance,
normal_distance,
shade;
PrimaryInfo
normal;
const Quantum
*magick_restrict center,
*magick_restrict p,
*magick_restrict post,
*magick_restrict pre;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i;
/*
Determine the surface normal and compute shading.
*/
pre=p+GetPixelChannels(linear_image);
center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image);
post=center+(linear_image->columns+2)*GetPixelChannels(linear_image);
normal.x=(double) (
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image)));
normal.y=(double) (
GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+
GetShadeIntensity(linear_image,post)+
GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))-
GetShadeIntensity(linear_image,pre)-
GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image)));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+
normal.z*normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel
channel;
PixelTrait
shade_traits,
traits;
channel=GetPixelChannelChannel(linear_image,i);
traits=GetPixelChannelTraits(linear_image,channel);
shade_traits=GetPixelChannelTraits(shade_image,channel);
if ((traits == UndefinedPixelTrait) ||
(shade_traits == UndefinedPixelTrait))
continue;
if ((shade_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if ((traits & UpdatePixelTrait) == 0)
{
SetPixelChannel(shade_image,channel,center[i],q);
continue;
}
if (gray != MagickFalse)
{
SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q);
continue;
}
SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade*
center[i]),q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(shade_image);
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel_info->width,kernel_info->height*
sizeof(*kernel_info->values)));
if (kernel_info->values == (MagickRealType *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=ConvolveImage(image,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a square area defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,
% const PixelInterpolateMethod method,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: intepolation method.
%
% o radius: choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,
const PixelInterpolateMethod method,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse)
{
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolatePixelChannels(image,image_view,spread_image,method,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q,
exception);
if (status == MagickFalse)
break;
q+=GetPixelChannels(spread_image);
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
double
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold,
exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImage(image,radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(double) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits,
unsharp_traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
unsharp_traits=GetPixelChannelTraits(unsharp_image,channel);
if ((traits == UndefinedPixelTrait) ||
(unsharp_traits == UndefinedPixelTrait))
continue;
if ((unsharp_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(unsharp_image,channel,p[i],q);
continue;
}
pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q);
if (fabs(2.0*pixel) < quantum_threshold)
pixel=(double) p[i];
else
pixel=(double) p[i]+gain*pixel;
SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(unsharp_image);
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
clonedetector.h | #if ! defined CLONEDETECTOR_H
#define CLONEDETECTOR_H
#include <cassert>
#include <vector>
#include <map>
#include "../common/hash_map_includer.h"
#include <algorithm>
#include <limits>
#include <iterator>
#include <boost/cstdint.hpp>
#include <boost/array.hpp>
#include <boost/thread.hpp>
#include "../ccfx/ccfxcommon.h"
#include "../threadqueue/threadqueue.h"
#if defined _MSC_VER
#undef max
#undef min
#endif
template<typename ElemType, typename HashValueType>
class CloneDetector {
private:
class SubSequence {
private:
size_t begin;
size_t end;
public:
SubSequence(size_t beginPos_, size_t endPos_)
: begin(beginPos_), end(endPos_)
{
assert(beginPos_ <= endPos_);
}
SubSequence()
: begin(), end()
{
}
SubSequence(const SubSequence &right)
: begin(right.begin), end(right.end)
{
}
public:
//bool operator==(const SubSequence &right) const
//{
// if (end - begin != right.end - right.begin) {
// return false;
// }
// const std:: vector<ElemType> &seq = *pSeq;
// size_t li = begin;
// size_t ri = right.begin;
// while (li != end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt != rt) {
// return false;
// }
// ++li;
// ++ri;
// }
// return true;
//}
//bool operator<(const SubSequence &right) const
//{
// const std:: vector<ElemType> &seq = *pSeq;
// size_t li = begin;
// size_t ri = right.begin;
// while (li < end && ri < right.end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt != rt) {
// break; // while
// }
// ++li;
// ++ri;
// }
//
// if (li < end) {
// if (ri < right.end) {
// ElemType lt = to_compared(seq, li, begin);
// ElemType rt = to_compared(*right.pSeq, ri, right.begin);
// if (lt < rt) {
// return true;
// }
// else {
// return false;
// }
// }
// else {
// assert(ri == right.end);
// return false;
// }
// }
// else {
// assert(li == end);
// if (ri < right.end) {
// return true;
// }
// else {
// assert(ri == right.end);
// return false;
// }
// }
//}
//const ElemType &operator[](size_t index) const
//{
// return *(li + index);
//}
void swap(SubSequence &right)
{
std:: swap(this->begin, right.begin);
std:: swap(this->end, right.end);
}
inline size_t size() const
{
return end - begin;
}
inline size_t getBegin() const
{
return this->begin;
}
inline size_t getEnd() const
{
return this->end;
}
public:
class SequencePrevComparator {
private:
size_t unitLength;
const typename std:: vector<ElemType> *pSeq;
public:
SequencePrevComparator(size_t unitLength_, const std:: vector<ElemType> *pSeq_)
: unitLength(unitLength_), pSeq(pSeq_)
{
}
SequencePrevComparator()
: unitLength(0), pSeq(NULL)
{
}
SequencePrevComparator(const SequencePrevComparator &right)
: unitLength(right.unitLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
assert(posLeft + unitLength <= (*pSeq).size());
assert(posRight + unitLength <= (*pSeq).size());
size_t i;
for (i = 0; i < unitLength; ++i) {
const ElemType &li = to_compared(*pSeq, posLeft + i, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + i, posRight);
assert(li != 0);
assert(ri != 0);
if (li != ri) {
break; // for i
}
}
if (i != unitLength) {
const ElemType &li = to_compared(*pSeq, posLeft + i, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + i, posRight);
return li < ri;
}
else {
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + unitLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + unitLength);
return lp < rp;
}
}
};
class ExtensionPrevComparator {
private:
size_t baseLength;
const typename std:: vector<ElemType> *pSeq;
public:
ExtensionPrevComparator(size_t baseLength_, const std:: vector<ElemType> *pSeq_)
: baseLength(baseLength_), pSeq(pSeq_)
{
}
ExtensionPrevComparator()
: baseLength(0), pSeq(NULL)
{
}
ExtensionPrevComparator(const ExtensionPrevComparator &right)
: baseLength(right.baseLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
assert(posLeft + baseLength < (*pSeq).size());
assert(posRight + baseLength < (*pSeq).size());
const ElemType &li = to_compared(*pSeq, posLeft + baseLength, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + baseLength, posRight);
if (li == ri) {
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + baseLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + baseLength);
return lp < rp;
}
else {
return li < ri;
}
}
};
class PrevExtensionComparator {
private:
size_t baseLength;
const typename std:: vector<ElemType> *pSeq;
public:
PrevExtensionComparator(size_t baseLength_, const std:: vector<ElemType> *pSeq_)
: baseLength(baseLength_), pSeq(pSeq_)
{
}
PrevExtensionComparator()
: baseLength(0), pSeq(NULL)
{
}
PrevExtensionComparator(const ExtensionPrevComparator &right)
: baseLength(right.baseLength), pSeq(right.pSeq)
{
}
bool operator()(size_t posLeft, size_t posRight) const
{
const ElemType &lp = to_reversereference_compared(*pSeq, posLeft - 1, posLeft, posLeft + baseLength);
const ElemType &rp = to_reversereference_compared(*pSeq, posRight - 1, posRight, posRight + baseLength);
if (lp == rp) {
assert(posLeft + baseLength < (*pSeq).size());
assert(posRight + baseLength < (*pSeq).size());
const ElemType &li = to_compared(*pSeq, posLeft + baseLength, posLeft);
const ElemType &ri = to_compared(*pSeq, posRight + baseLength, posRight);
return li < ri;
}
else {
return lp < rp;
}
}
};
};
static bool subsequenceEqual(const typename std:: vector<ElemType> *pSeq, const SubSequence &left, const SubSequence &right)
{
if (left.size() != right.size()) {
return false;
}
size_t li = left.getBegin();
size_t ri = right.getBegin();
while (li != left.getEnd()) {
ElemType lt = to_compared(*pSeq, li, left.getBegin());
ElemType rt = to_compared(*pSeq, ri, right.getBegin());
if (lt != rt) {
return false;
}
++li;
++ri;
}
return true;
}
public:
class SequenceHashFunction {
public:
virtual ~SequenceHashFunction()
{
}
virtual HashValueType operator()(const typename std:: vector<ElemType> &seq, size_t begin, size_t end) = 0;
};
public:
struct CloneSetItem {
public:
ElemType prev;
ElemType extension;
std::vector<size_t/* pos */> poss;
public:
CloneSetItem(const CloneSetItem &right)
: prev(right.prev), extension(right.extension), poss(right.poss)
{
}
CloneSetItem()
: prev(0), extension(0), poss()
{
}
};
class CloneSetListener {
private:
const std:: vector<ElemType> *pSeq;
size_t unitLength;
public:
virtual ~CloneSetListener()
{
}
CloneSetListener()
: pSeq(NULL), unitLength(0)
{
}
CloneSetListener(const CloneSetListener &right)
: pSeq(right.pSeq), unitLength(right.unitLength)
{
}
public:
virtual void attachSeq(const std:: vector<ElemType> *pSeq_)
{
pSeq = pSeq_;
const std:: vector<ElemType> &seq = *pSeq;
if (! seq.empty()) {
assert(seq[0] == 0);
assert(seq.back() == 0);
}
}
virtual void setUnitLength(size_t unitLength_)
{
unitLength = unitLength_;
}
public:
virtual bool rangeCheck(const std:: vector<CloneSetItem>& UNUSED(cloneSet))
{
return true;
}
virtual bool codeCheck(size_t UNUSED(pos), size_t UNUSED(length))
{
return true;
}
virtual void found(
const std:: vector<CloneSetItem>& UNUSED(cloneSet),
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
}
protected:
const std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
size_t getUnitLength() const
{
return unitLength;
}
};
class ClonePairListener {
private:
const typename std:: vector<ElemType> *pSeq;
size_t unitLength;
public:
virtual ~ClonePairListener()
{
}
ClonePairListener()
: pSeq(NULL), unitLength(0)
{
}
ClonePairListener(const CloneSetListener &right)
: pSeq(right.pSeq), unitLength(right.unitLength)
{
}
public:
virtual void attachSeq(const std:: vector<ElemType> *pSeq_)
{
pSeq = pSeq_;
}
virtual void setUnitLength(size_t unitLength_)
{
unitLength = unitLength_;
}
public:
virtual bool codeCheck(size_t UNUSED(pos), size_t UNUSED(length))
{
return true;
}
virtual bool rangeCheck(const std:: vector<CloneSetItem>& UNUSED(cloneSet))
{
return true;
}
virtual void found(
size_t pos1,
size_t pos2,
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
assert(pos1 < pos2);
}
protected:
const typename std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
size_t getUnitLength() const
{
return unitLength;
}
};
class ClonePairListenerWithScope : public ClonePairListener {
private:
size_t barrior;
enum mode_t { mode_all, mode_left_and_cross, mode_cross } mode;
public:
virtual ~ClonePairListenerWithScope()
{
}
ClonePairListenerWithScope()
: ClonePairListener(), barrior(0), mode(mode_all)
{
}
ClonePairListenerWithScope(const CloneSetListener &right)
: ClonePairListener(right), barrior(right.barrior), mode(right.mode)
{
}
public:
void setAllMode()
{
mode = mode_all;
}
void setCrossMode(size_t barrior_)
{
mode = mode_cross;
barrior = barrior_;
}
void setLeftAndCrossMode(size_t barrior_)
{
mode = mode_left_and_cross;
barrior = barrior_;
}
public:
virtual bool rangeCheck(const std:: vector<CloneSetItem> &cloneSet)
{
switch (mode) {
case mode_all:
{
return true;
}
break;
case mode_left_and_cross:
{
for (size_t i = 0; i < cloneSet.size(); ++i) {
const std::vector<size_t> &poss = cloneSet[i].poss;
for (std:: vector<size_t/* pos */>::const_iterator j = poss.begin(); j != poss.end(); ++j) {
if (*j < barrior) {
return true;
}
}
}
}
break;
case mode_cross:
{
bool leftFound = false;
bool rightFound = false;
for (size_t i = 0; i < cloneSet.size(); ++i) {
const std::vector<size_t> &poss = cloneSet[i].poss;
for (std:: vector<size_t/* pos */>::const_iterator j = poss.begin(); j != poss.end(); ++j) {
if (*j < barrior) {
leftFound = true;
}
else {
rightFound = true;
}
if (leftFound && rightFound) {
return true;
}
}
}
}
break;
default:
assert(false);
break;
}
return false;
}
virtual void found(size_t posA, size_t posB, size_t baseLength, boost::uint64_t cloneSetReferenceNumber)
{
assert(posA < posB);
switch (mode) {
case mode_all:
break;
case mode_left_and_cross:
if (posA >= barrior && posB >= barrior) {
return;
}
break;
case mode_cross:
if (
((posA >= barrior) && (posB >= barrior)) ||
((posA < barrior) && (posB < barrior))
)
{
return;
}
break;
default:
assert(false);
break;
}
found_scoped(posA, posB, baseLength, cloneSetReferenceNumber);
}
protected:
virtual void found_scoped(
size_t pos1,
size_t pos2,
size_t UNUSED(baseLength),
boost::uint64_t UNUSED(cloneSetReferenceNumber))
{
assert(pos1 < pos2);
}
};
private:
class ClonePairListenerAdapter : public CloneSetListener {
private:
ClonePairListener *pListener;
public:
ClonePairListenerAdapter(ClonePairListener *pRight)
: pListener(pRight)
{
}
public:
virtual void attachSeq(const typename std:: vector<ElemType> *pSeq_)
{
CloneSetListener::attachSeq(pSeq_);
(*pListener).attachSeq(pSeq_);
}
virtual void setUnitLength(size_t unitLength_)
{
CloneSetListener::setUnitLength(unitLength_);
(*pListener).setUnitLength(unitLength_);
}
public:
virtual bool codeCheck(size_t pos, size_t length)
{
return (*pListener).codeCheck(pos, length);
}
virtual bool rangeCheck(const std:: vector<CloneSetItem> &cloneSet)
{
return (*pListener).rangeCheck(cloneSet);
}
virtual void found(const std:: vector<CloneSetItem> &cloneSet, size_t baseLength,
boost::uint64_t cloneSetReferenceNumber)
{
//const typename std:: vector<ElemType> &seq = refSeq();
//size_t unitLength = getUnitLength(); //unused variable
for (size_t csi = 0; csi < cloneSet.size(); ++csi) {
const CloneSetItem &cs = cloneSet[csi];
for (size_t csj = csi; csj < cloneSet.size(); ++csj) { // 2008/02/13
const CloneSetItem &right = cloneSet[csj];
if ((cs.prev == 0 || cs.prev != right.prev) && (cs.extension == 0 || cs.extension != right.extension)) {
const std::vector<size_t> &poss = cs.poss;
for (std:: vector<size_t/* pos */>::const_iterator a = poss.begin(); a != poss.end(); ++a) {
const std::vector<size_t> &possRight = right.poss;
for (std:: vector<size_t/* pos */>::const_iterator b = (&cs == &right) ? a + 1 : possRight.begin(); b != possRight.end(); ++b) {
size_t posA = *a;
size_t posB = *b;
assert(posA != posB);
if (posA < posB) {
(*pListener).found(posA, posB, baseLength, cloneSetReferenceNumber);
}
else {
(*pListener).found(posB, posA, baseLength, cloneSetReferenceNumber);
}
}
}
}
}
}
}
};
private:
const typename std:: vector<ElemType> *pSeq;
size_t bottomUnitLength;
size_t multiply;
std:: vector<HashValueType> hashSeq;
//bool optionVerbose;
boost::uint64_t cloneSetReferenceNumber;
size_t numThreads;
public:
CloneDetector()
: pSeq(NULL), bottomUnitLength(0), multiply(1), hashSeq()/*, optionVerbose(false)*/, cloneSetReferenceNumber(0), numThreads(1)
{
}
CloneDetector(const CloneDetector &right)
: pSeq(right.pSeq), bottomUnitLength(right.bottomUnitLength), multiply(right.multiply), hashSeq(right.hashSeq)/*, optionVerbose(right.optionVerbose)*/, numThreads(1)
{
}
private:
CloneDetector(size_t dummy) // dummy to ensure methods
{
std::vector<ElemType> seqDummy;
size_t pos = 0;
size_t begin = 0;
size_t end = 0;
ElemType t1 = to_compared(&seqDummy, pos, begin);
ElemType t2 = to_reversereference_compared(&seqDummy, pos, begin, end);
assert(false);
}
public:
void setThreads(size_t numThreads_)
{
numThreads = numThreads_;
}
void attachSequence(const std:: vector<ElemType> *pSeq_)
{
assert(pSeq_ != NULL);
pSeq = pSeq_;
}
const typename std:: vector<ElemType> &refSeq() const
{
return *pSeq;
}
void detachSequence()
{
pSeq = NULL;
}
void setBottomUnitLength(size_t bottomUnitLength_)
{
bottomUnitLength = bottomUnitLength_;
}
void setMultiply(size_t multiply_)
{
multiply = multiply_;
}
size_t getUnitLength() const
{
return bottomUnitLength * multiply;
}
// void setOptionVerbose(bool ov)
// {
// optionVerbose = ov;
// }
// bool getOptionVerbose() const
// {
// return optionVerbose;
// }
void clearCloneSetReferenceNumber()
{
cloneSetReferenceNumber = 0;
}
void findClonePair(ClonePairListener *pListener, SequenceHashFunction &hashFunc)
{
ClonePairListenerAdapter a(pListener);
findCloneSet(&a, hashFunc);
}
public:
void print_seq(size_t beginPos, size_t len)
{
const std:: vector<ElemType> &seq = *pSeq;
size_t endPos = beginPos + len;
size_t count = 0;
for (size_t i = beginPos; i < endPos; ++i) {
if (count > 0) {
std::cout << " ";
}
std::cout << (int)(seq[i]);
++count;
if (count == 10) {
std::cout << std::endl;
count = 0;
}
}
}
private:
struct CloneSetData {
std::vector<CloneSetItem> cloneSet;
size_t baseLength;
};
void send_clone_set_data_to_listener(ThreadQueue<std::vector<std::vector<CloneSetData> > *> *pQue, CloneSetListener *pListener) {
std::vector<std::vector<CloneSetData> > *pFoundCloneSetsForThreads;
while ((pFoundCloneSetsForThreads = (*pQue).pop()) != NULL) {
std::vector<std::vector<CloneSetData> > &foundCloneSetsForThreads = *pFoundCloneSetsForThreads;
for (size_t cii = 0; cii < foundCloneSetsForThreads.size(); ++cii) {
std::vector<CloneSetData> &foundCloneSets = foundCloneSetsForThreads[cii];
for (size_t csi = 0; csi < foundCloneSets.size(); ++csi) {
++cloneSetReferenceNumber;
const CloneSetData &cloneSetData = foundCloneSets[csi];
(*pListener).found(cloneSetData.cloneSet, cloneSetData.baseLength, cloneSetReferenceNumber);
}
}
delete pFoundCloneSetsForThreads;
}
}
public:
void findCloneSet(CloneSetListener *pListener, SequenceHashFunction &hashFunc)
{
const std:: vector<ElemType> &seq = *pSeq;
const size_t unitLength = getUnitLength();
//if (optionVerbose) {
// std:: cerr << "> finding identical substrings" << std:: endl;
//}
calc_hash_seq(hashFunc);
//std::vector<HashValueType> hashSeqCopy = hashSeq;
//calc_hash_seq_prev_version(hashFunc);
//assert(hashSeq.size() == hashSeqCopy.size());
//for (size_t i = 0; i < hashSeq.size(); ++i) {
// assert(hashSeq[i] == hashSeqCopy[i]);
//}
//for (size_t i = 0; i < hashSeq.size(); ++i) {
// std:: cout << i << ": " << hashSeq[i] << " " << std:: endl;
//}
(*pListener).attachSeq(&seq);
(*pListener).setUnitLength(unitLength);
if (seq.size() < unitLength) {
return;
}
std::vector<std:: vector<size_t/* pos */> > cloneFragments;
{
std:: vector<size_t> cloneCounts;
cloneCounts.resize((size_t)(std:: numeric_limits<HashValueType>::max()) + 1, 0);
size_t pos = 1;
while (pos < seq.size() - unitLength) {
HashValueType h = hashSeq[pos];
if (h != 0) {
assert(cloneCounts[h] < std::numeric_limits<size_t>::max());
++cloneCounts[h];
++pos;
}
else {
if (pos + unitLength < hashSeq.size()) {
if (pos > unitLength) {
assert(hashSeq[pos + unitLength - 1] == 0);
pos += unitLength;
}
while (pos < seq.size() - unitLength && hashSeq[pos] == 0) {
++pos;
}
}
else {
break; // while pos
}
}
}
cloneFragments.resize((size_t)(std:: numeric_limits<HashValueType>::max()) + 1);
pos = 1;
while (pos < seq.size() - unitLength) {
HashValueType h = hashSeq[pos];
if (h != 0) {
if (cloneCounts[h] >= 2) {
cloneFragments[h].reserve(cloneCounts[h]);
cloneFragments[h].push_back(pos);
}
++pos;
}
else {
if (pos + unitLength < hashSeq.size()) {
if (pos > unitLength) {
assert(hashSeq[pos + unitLength - 1] == 0);
pos += unitLength;
}
while (pos < seq.size() - unitLength && hashSeq[pos] == 0) {
++pos;
}
}
else {
break; // while pos
}
}
}
}
ThreadQueue<std::vector<std::vector<CloneSetData> > *> que(10);
boost::thread eater(boost::bind(&CloneDetector::send_clone_set_data_to_listener, this, &que, pListener));
size_t worker = std::max((size_t)1, (size_t)numThreads);
std::vector<size_t> validCis;
validCis.reserve(numThreads);
size_t ci = 1;
while (ci < cloneFragments.size()) {
validCis.clear();
while (ci < cloneFragments.size() && validCis.size() < worker) {
if (cloneFragments[ci].size() > 0) {
validCis.push_back(ci);
}
++ci;
}
std::vector<std::vector<CloneSetData> > *pFoundCloneSetsForThreads = new std::vector<std::vector<CloneSetData> >();
std::vector<std::vector<CloneSetData> > &foundCloneSetsForThreads = *pFoundCloneSetsForThreads;
foundCloneSetsForThreads.resize(worker);
size_t validCiCount = validCis.size();
#pragma omp parallel for schedule(dynamic)
for (size_t cii = 0; cii < validCiCount; ++cii) {
const size_t threadNum = cii;
size_t tci = validCis[cii];
std::vector<CloneSetData> &foundCloneSets = foundCloneSetsForThreads[threadNum];
foundCloneSets.clear();
std:: vector<size_t/* pos */> &poss = cloneFragments[tci];
if (poss.size() > 1) {
typename SubSequence::SequencePrevComparator spc(unitLength, pSeq);
std:: sort(poss.begin(), poss.end(), spc);
size_t j = 0;
while (j < poss.size()) {
size_t pj = poss[j];
SubSequence ssj(pj, pj + unitLength);
SubSequence ssk;
size_t k = j + 1;
while (k < poss.size() && subsequenceEqual(pSeq, (ssk = SubSequence(poss[k], poss[k] + unitLength)), ssj)) {
++k;
}
// here, subsequence begining at j, ..., subsequence begining at k - 1 have the same subsequence
assert(k == poss.size() || ! subsequenceEqual(pSeq, ssj, ssk));
size_t size = k - j;
if (size <= 1) {
NULL;
}
else {
const ElemType &firstPrev = to_reversereference_compared(*pSeq, poss[j] - 1, poss[j], poss[j] + unitLength);
const ElemType &lastPrev = to_reversereference_compared(*pSeq, poss[k - 1] - 1, poss[k - 1], poss[k - 1] + unitLength);
if (firstPrev != 0 && firstPrev != -1 && firstPrev == lastPrev) { // 2007/10/29 //if (firstPrev != 0 && firstPrev == lastPrev) {
NULL;
}
else {
size_t maxExtend = calc_max_extend(poss, j, k, unitLength);
typename SubSequence::PrevExtensionComparator pec(unitLength + maxExtend, pSeq);
std:: sort(poss.begin() + j, poss.begin() + k, pec);
output_clone_set(poss, j, k, unitLength + maxExtend, pListener, &foundCloneSets);
find_clone_set_i(&poss, j, k, unitLength + maxExtend, pListener, &foundCloneSets);
}
}
j = k;
ssj = ssk;
}
}
}
que.push(pFoundCloneSetsForThreads);
}
que.push(NULL);
eater.join();
hashSeq.clear();
}
private:
void find_clone_set_i(std:: vector<size_t/* pos */> *pPoss, size_t begin, size_t end,
size_t baseLength, CloneSetListener *pListener, std::vector<CloneSetData> *pFoundCloneSets)
{
if (end - begin <= 1) {
return;
}
std:: vector<size_t/* pos */> &poss = *pPoss;
typename SubSequence::ExtensionPrevComparator epc(baseLength, pSeq);
std:: sort(poss.begin() + begin, poss.begin() + end, epc);
size_t nnBegin = begin;
while (nnBegin < end && (poss[nnBegin] + baseLength >= (*pSeq).size() || (*pSeq)[poss[nnBegin] + baseLength] == 0)) {
++nnBegin;
}
begin = nnBegin;
if (end - nnBegin <= 1) {
return;
}
size_t j = nnBegin;
while (j < end) {
size_t k = j + 1;
while (k < end && to_compared(*pSeq, poss[k] + baseLength, poss[k]) == to_compared(*pSeq, poss[j] + baseLength, poss[j])) {
++k;
}
//std::cerr << to_compared(*pSeq, poss[j] + baseLength, poss[j]) << ": "; for (size_t I = j; I < k; ++I) { std::cerr << poss[I] << " "; } std::cerr << std::endl;
// here, subsequence begining at j, ..., subsequence begining at k - 1 have the same subsequence
assert(k == end || ! (to_compared(*pSeq, poss[k] + baseLength, poss[k]) == to_compared(*pSeq, poss[j] + baseLength, poss[j])));
size_t size = k - j;
if (size <= 1) {
NULL;
}
else {
size_t pj = poss[j];
const ElemType &firstPrev = to_reversereference_compared(*pSeq, pj - 1, pj, pj + baseLength);
const ElemType &lastPrev = to_reversereference_compared(*pSeq, poss[k - 1] - 1, poss[k - 1], poss[k - 1] + baseLength);
if (firstPrev != 0 && firstPrev != -1 && firstPrev == lastPrev) { // 2007/11/02 //if (firstPrev != 0 && firstPrev == lastPrev) {
NULL;
}
else {
size_t maxExtend = calc_max_extend(poss, j, k, baseLength);
typename SubSequence::PrevExtensionComparator pec(baseLength + maxExtend, pSeq);
std:: sort(poss.begin() + j, poss.begin() + k, pec);
output_clone_set(poss, j, k, baseLength + maxExtend, pListener, pFoundCloneSets);
find_clone_set_i(&poss, j, k, baseLength + maxExtend, pListener, pFoundCloneSets);
}
}
j = k;
}
}
void output_clone_set(const std:: vector<size_t/* pos */> &poss, size_t begin, size_t end, size_t baseLength, CloneSetListener *pListener,
std::vector<CloneSetData> *pFoundCloneSets)
{
if (end - begin == 0 || ! (*pListener).codeCheck(poss[begin], baseLength)) {
return;
}
std:: vector<CloneSetItem> cloneSet;
size_t p = begin;
while (p < end) {
const ElemType &prevp = to_reversereference_compared(*pSeq, poss[p] - 1, poss[p], poss[p] + baseLength);
size_t q = p + 1;
while (q < end && to_reversereference_compared(*pSeq, poss[q] - 1, poss[q], poss[q] + baseLength) == prevp) {
++q;
}
// here, subsequence begining at p, ..., subsequence begining at q - 1 have the same prev
assert(q == end || prevp != to_reversereference_compared(*pSeq, poss[q] - 1, poss[q], poss[q] + baseLength));
size_t i = p;
while (i < q) {
const ElemType &extensioni = to_compared(*pSeq, poss[i] + baseLength, poss[i]);
size_t j = i + 1;
while (j < q && to_compared(*pSeq, poss[j] + baseLength, poss[j]) == extensioni) {
++j;
}
// here, subsequence begining at i, ..., subsequence begining at j - 1 have the same extension
assert(j == q || extensioni != to_compared(*pSeq, poss[j] + baseLength, poss[j]));
cloneSet.resize(cloneSet.size() + 1);
CloneSetItem &cs = cloneSet.back();
cs.prev = prevp;
cs.extension = extensioni;
cs.poss.insert(cs.poss.end(), poss.begin() + i, poss.begin() + j);
i = j;
}
p = q;
}
if ((*pListener).rangeCheck(cloneSet)) {
std::vector<CloneSetData> &foundCloneSets = *pFoundCloneSets;
foundCloneSets.resize(foundCloneSets.size() + 1);
CloneSetData &cloneSetData = foundCloneSets.back();
cloneSetData.cloneSet.swap(cloneSet);
cloneSetData.baseLength = baseLength;
}
}
size_t calc_max_extend(const std:: vector<size_t/* pos */> &poss, size_t begin, size_t end, size_t baseLength)
{
assert(end - begin >= 2);
size_t extend = 0;
while (true) {
size_t posj = poss[begin];
const ElemType &ej = to_compared(*pSeq, posj + baseLength + extend, posj);
if (ej == 0) {
return extend;
}
for (size_t p = begin + 1; p < end; ++p) {
size_t pos = poss[p];
const ElemType &ep = to_compared(*pSeq, pos + baseLength + extend, pos);
if (ep != ej) {
return extend;
}
}
++extend;
}
return extend;
}
void calc_hash_seq(SequenceHashFunction &hashFunc)
{
const std::vector<ElemType> &seq = *pSeq;
hashSeq.clear();
hashSeq.resize(seq.size(), 0);
size_t num = bottomUnitLength * multiply;
std:: vector<size_t> factors0;
factorize(&factors0, num);
if (factors0.size() == 0) {
make_bottom_level_hash_sequence(seq, hashFunc, &hashSeq, bottomUnitLength * multiply);
}
else {
size_t beginPos = 0;
assert(seq.size() == 0 || seq.back() == 0);
while (beginPos < seq.size() - 1) {
typename std::vector<ElemType>::const_iterator j = std::find(seq.begin() + beginPos + 1, seq.end(), 0);
size_t nextPos = j - seq.begin();
assert(seq[nextPos] == 0);
size_t endPos = nextPos + 1;
assert(endPos <= seq.size());
size_t blockSize = endPos - beginPos;
if (blockSize < num) {
// hashSeq[beginPos ... endPos] has been zero-filled already.
}
else {
int fi = factors0.size() - 1;
size_t f = factors0[fi];
make_bottom_level_hash_sequence(seq, hashFunc, &hashSeq, f, beginPos, endPos);
size_t curUnitLength = f;
while (--fi >= 0) {
size_t f = factors0[fi];
multiple_hash_sequence(hashFunc, &hashSeq, curUnitLength, f, beginPos, endPos);
curUnitLength *= f;
}
assert(curUnitLength == bottomUnitLength * multiply);
}
beginPos = nextPos;
}
}
}
private:
static inline void multiple_hash_sequence(SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength, size_t multiply)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
if (! hashSeq.empty()) {
multiple_hash_sequence(hashFunc, pHashSeq, unitLength, multiply, 0, hashSeq.size());
}
}
static void multiple_hash_sequence(
SequenceHashFunction& UNUSED(hashFunc),
std:: vector<HashValueType> *pHashSeq,
size_t unitLength,
size_t multiply,
size_t beginPos,
size_t endPos)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
assert(beginPos < hashSeq.size());
assert(hashSeq[beginPos] == 0);
assert(endPos <= hashSeq.size());
assert(hashSeq[endPos - 1] == 0);
assert(unitLength >= 1);
assert(endPos > unitLength * multiply);
for (size_t i = beginPos + 1; i < endPos - unitLength * multiply; ++i) {
HashValueType value = 0;
for (size_t j = 0; j < multiply; ++j) {
HashValueType h = hashSeq[i + j * unitLength];
/*
** when i == endPos - unitLength * multiply - 1 and j == multiply - 1,
** i + j * unitLength
** = endPos - unitLength * multiply - 1 + unitLength * (multiply - 1)
** = endPos - unitLength * multiply - 1 + unitLength * multiply - unitLength
** = endPos - unitLength - 1
** Therefore, i + j * unitLength < endPos - 1
*/
assert(h != 0);
value += h;
}
hashSeq[i] = value == 0 ? 1 : value; // 0ÍdelimiterƩȳêé½ßAnbV
lƵÄp¢é±ÆÍūȢ
}
std::fill(hashSeq.begin() + endPos - unitLength * multiply, hashSeq.begin() + endPos, 0);
}
static inline void make_bottom_level_hash_sequence(const std:: vector<ElemType> &seq, SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength)
{
std:: vector<HashValueType> &hashSeq = *pHashSeq;
if (! hashSeq.empty()) {
make_bottom_level_hash_sequence(seq, hashFunc, pHashSeq, unitLength, 0, seq.size());
}
}
static void make_bottom_level_hash_sequence(const std:: vector<ElemType> &seq, SequenceHashFunction &hashFunc,
std:: vector<HashValueType> *pHashSeq, size_t unitLength, size_t beginPos, size_t endPos)
{
assert(beginPos < seq.size());
assert(seq[beginPos] == 0);
assert(endPos <= seq.size());
assert(seq[endPos - 1] == 0);
assert(unitLength >= 1);
std:: vector<HashValueType> &hashSeq = *pHashSeq;
assert(hashSeq.size() == seq.size());
size_t i = beginPos + 1;
if (endPos - beginPos >= unitLength) {
typename std::vector<ElemType>::const_iterator range_begin = seq.begin() + beginPos + 1;
typename std::vector<ElemType>::const_iterator range_end = seq.begin() + endPos - unitLength; // value of i at the last repetition of the following 'for' loop
//assert(std::find(range_begin, range_end, 0) == range_end);
for (; i < endPos - unitLength; ++i) {
HashValueType hashValue = hashFunc(seq, i, i + unitLength);
hashSeq[i] = hashValue == 0 ? 1 : hashValue; // 0ÍdelimiterƩȳêé½ßAnbV
lƵÄp¢é±ÆÍūȢ
}
}
std::fill(hashSeq.begin() + i, hashSeq.begin() + endPos, 0);
}
static void factorize(std:: vector<size_t> *pFactors, size_t number0)
{
std:: vector<size_t> &factors = *pFactors;
factors.clear();
size_t number = number0;
while (number > 1) {
bool found = false;
for (size_t i = 2; i < number / 2; ++i) {
if (number % i == 0) {
factors.push_back(i);
number /= i;
found = true;
break; // for i
}
}
if (! found) {
factors.push_back(number);
return;
}
}
}
//static void fill_zero(const std:: vector<ElemType> &seq, std:: vector<HashValueType> *pHashSeq, size_t unitLength)
//{
// assert(seq.size() == (*pHashSeq).size());
//
// if ((*pHashSeq).size() == 0) {
// return;
// }
// size_t i = (*pHashSeq).size() - 1;
// while (true) {
// if (seq[i] == 0) {
// for (size_t j = 0; j < unitLength; ++j) {
// (*pHashSeq)[i] = 0;
// if (--i == 0) {
// return;
// }
// }
// }
// if (--i == 0) {
// return;
// }
// }
//}
};
#endif // CLONEDETECTOR_H
|
trmm_x_dia_n_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Number* Y = &y[index2(cc,0,ldy)];
for (ALPHA_INT i = 0; i < mat->rows; i++)
alpha_mul(Y[i],Y[i],beta);
const ALPHA_Number* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d <= 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val,X[ac]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
vsum.c | //
// vsum.c : Demo of multi-target mulit-source OpenMP offload
//
#include <stdio.h>
void vsum(int*a, int*b, int*c, int N){
#pragma omp target teams map(to: a[0:N],b[0:N]) map(from:c[0:N])
#pragma omp distribute parallel for
for(int i=0;i<N;i++) {
c[i]=a[i]+b[i*2];
}
}
|
taskbench.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "common.h"
#include "taskbench.h"
#define DEPTH 6
int main(int argc, char **argv) {
init(argc, argv);
#ifdef OMPVER3
/* GENERATE REFERENCE TIME */
reference("reference time 1", &refer);
/* TEST PARALLEL TASK GENERATION */
benchmark("PARALLEL TASK", &testParallelTaskGeneration);
/* TEST MASTER TASK GENERATION */
benchmark("MASTER TASK", &testMasterTaskGeneration);
/* TEST MASTER TASK GENERATION WITH BUSY SLAVES */
benchmark("MASTER TASK BUSY SLAVES", &testMasterTaskGenerationWithBusySlaves);
/* TEST CONDITIONAL TASK GENERATION */
#ifndef DISABLE_CONDITIONAL_TASK_TEST
benchmark("CONDITIONAL TASK", &testConditionalTaskGeneration);
#endif // DISABLE_CONDITIONAL_TASK_TEST
/* TEST TASK WAIT */
benchmark("TASK WAIT", &testTaskWait);
/* TEST TASK BARRIER */
#ifndef DISABLE_BARRIER_TEST
benchmark("TASK BARRIER", &testTaskBarrier);
#endif //DISABLE_BARRIER_TEST
#ifndef DISABLE_NESTED_TASKS_TESTS
/* TEST NESTED TASK GENERATION */
benchmark("NESTED TASK", &testNestedTaskGeneration);
/* TEST NESTED MASTER TASK GENERATION */
benchmark("NESTED MASTER TASK", &testNestedMasterTaskGeneration);
#endif // DISABLE_NESTED_TASKS_TESTS
/* GENERATE THE SECOND REFERENCE TIME */
reference("reference time 2", &refer);
/* TEST BRANCH TASK TREE */
benchmark("BRANCH TASK TREE", &testBranchTaskGeneration);
/* TEST LEAF TASK TREE */
benchmark("LEAF TASK TREE", &testLeafTaskGeneration);
#endif // OMPVER3
finalise();
return EXIT_SUCCESS;
}
/* Calculate the reference time. */
void refer() {
int j;
for (j = 0; j < innerreps; j++) {
delay(delaylength);
}
}
/* Calculate the second reference time. */
void refer2() {
int j;
for (j = 0; j < (innerreps >> DEPTH) * (1 << DEPTH); j++) {
delay(delaylength);
};
}
/* Test parallel task generation overhead */
void testParallelTaskGeneration() {
int j;
#pragma omp parallel private( j )
{
for ( j = 0; j < innerreps; j ++ ) {
#pragma omp task
{
delay( delaylength );
} // task
}; // for j
} // parallel
}
/* Test master task generation overhead */
void testMasterTaskGeneration() {
int j;
#pragma omp parallel private(j)
{
#pragma omp master
{
/* Since this is executed by one thread we need innerreps * nthreads
iterations */
for (j = 0; j < innerreps * nthreads; j++) {
#pragma omp task
{
delay(delaylength);
}
} /* End for j */
} /* End master */
} /* End parallel */
}
/* Test master task generation overhead when the slave threads are busy */
void testMasterTaskGenerationWithBusySlaves() {
int j;
#pragma omp parallel private( j )
{
int thread_num = omp_get_thread_num();
for (j = 0; j < innerreps; j ++ ) {
if ( thread_num == 0 ) {
#pragma omp task
{
delay( delaylength );
} // task
} else {
delay( delaylength );
}; // if
}; // for j
} // parallel
}
/* Measure overhead of checking if a task should be spawned. */
void testConditionalTaskGeneration() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp task if(returnfalse())
{
delay( delaylength );
}
}
}
}
#ifndef DISABLE_NESTED_TASKS_TESTS
/* Measure overhead of nested tasks (all threads construct outer tasks) */
void testNestedTaskGeneration() {
int i,j;
#pragma omp parallel private( i, j )
{
for ( j = 0; j < innerreps / nthreads; j ++ ) {
#pragma omp task private( i )
{
for ( i = 0; i < nthreads; i ++ ) {
#pragma omp task untied
{
delay( delaylength );
} // task
}; // for i
// wait for inner tasks to complete
#pragma omp taskwait
} // task
}; // for j
} // parallel
}
/* Measure overhead of nested tasks (master thread constructs outer tasks) */
void testNestedMasterTaskGeneration() {
int i, j;
#pragma omp parallel private( i, j )
{
#pragma omp master
{
for ( j = 0; j < innerreps; j ++ ) {
#pragma omp task private( i )
{
for ( i = 0; i < nthreads; i ++ ) {
#pragma omp task
{
delay( delaylength );
} // task
}; // for i
// wait for inner tasks to complete
#pragma omp taskwait
} // task
}; // for j
} // master
} // parallel
}
#endif // DISABLE_NESTED_TASKS_TESTS
/* Measure overhead of taskwait (all threads construct tasks) */
void testTaskWait() {
int j;
#pragma omp parallel private( j )
{
for ( j = 0; j < innerreps; j ++ ) {
#pragma omp task
{
delay( delaylength );
} // task
#pragma omp taskwait
}; // for j
} // parallel
}
/* Measure overhead of tasking barrier (all threads construct tasks) */
void testTaskBarrier() {
int j;
#pragma omp parallel private( j )
{
for ( j = 0; j < innerreps; j ++ ) {
#pragma omp task
{
delay( delaylength );
} // task
#pragma omp barrier
}; // for j
} // parallel
}
/* Test parallel task generation overhead where work is done at all levels. */
void testBranchTaskGeneration() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < (innerreps >> DEPTH); j++) {
#pragma omp task
{
branchTaskTree(DEPTH);
delay(delaylength);
}
}
}
}
void branchTaskTree(int tree_level) {
if ( tree_level > 0 ) {
#pragma omp task
{
branchTaskTree(tree_level - 1);
branchTaskTree(tree_level - 1);
delay(delaylength);
}
}
}
/* Test parallel task generation overhead where work is done only at the leaf level. */
void testLeafTaskGeneration() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < (innerreps >> DEPTH); j++) {
leafTaskTree(DEPTH);
}
}
}
void leafTaskTree(int tree_level) {
if ( tree_level == 0 ) {
delay(delaylength);
} else {
#pragma omp task
{
leafTaskTree(tree_level - 1);
leafTaskTree(tree_level - 1);
}
}
}
|
exch.c | #include "micro_clib.h"
void compute_exch_field_micro(double *restrict m, double *restrict field, double *restrict energy,
double *restrict Ms_inv, double A, double dx, double dy, double dz,
int n, int *restrict ngbs) {
/* Compute the micromagnetic exchange field and energy using the
* matrix of neighbouring spins and a second order approximation
* for the derivative
*
* Ms_inv :: Array with the (1 / Ms) values for every mesh node.
* The values are zero for points with Ms = 0 (no material)
*
* A :: Exchange constant
*
* dx, dy, dz :: Mesh spacings in the corresponding directions
*
* n :: Number of mesh nodes
*
* ngbs :: The array of neighbouring spins, which has (6 * n)
* entries. Specifically, it contains the indexes of
* the neighbours of every mesh node, in the following order:
* -x, +x, -y, +y, -z, +z
*
* Thus, the array is like:
* | 0-x, 0+x, 0-y, 0+y, 0-z, 0+z, 1-x, 1+x, 1-y, ... |
* i=0 i=1 ...
*
* where 0-y is the index of the neighbour of the 0th spin,
* in the -y direction, for example. The index value for a
* neighbour where Ms = 0, is evaluated as -1. The array
* automatically gives periodic boundaries.
*
* A basic example is a 3 x 3 two dimensional mesh with PBCs
* in the X and Y direction:
*
* +-----------+
* | 6 | 7 | 8 |
* +-----------+
* | 3 | 4 | 5 |
* +-----------+
* | 0 | 1 | 2 |
* +-----------+
*
* so, the first 6 entries (neighbours of the 0th mesh node)
* of the array would be: [ 2 1 6 3 -1 -1 ... ]
* (-1 since there is no material in +-z, and a '2' first,
* since it is the left neighbour which is the PBC in x, etc..)
*
* For the exchange computation, the field is defined as:
* H_ex = (2 * A / (mu0 * Ms)) * nabla^2 (mx, my, mz)
*
* Therefore, for the i-th mesh node (spin), we approximate the
* derivatives as:
* nabla^2 mx = (1 / dx^2) * ( m[i-x] - 2 * m[i] + m[i+x] ) +
* (1 / dy^2) * ( m[i-y] - 2 * m[i] + m[i+y] ) +
* (1 / dz^2) * ( m[i-z] - 2 * m[i] + m[i+z] )
*
* Where i-x is the neighbour in the -x direction. This is similar
* for my and mz.
* We can notice that the sum is the same if we do:
* ( m[i-x] - m[i] ) + ( m[i+x] - m[i] )
* so we can iterate through the neighbours and perform the sum with the
* corresponding coefficient 1 /dx, 1/dy or 1/dz
*
* The *m array contains the spins as:
* [mx0, my0, mz0, mx1, my1, mz1, mx2, ...]
* so if we want the starting position of the magnetisation for the
* i-th spin, we only have to do (3 * i) for mx, (3 * i + 1) for my
* and (3 * i + 2) for mz
*
*
* IMPORTANT: The ex field usually has the structure:
* 2 * A / (mu0 Ms ) * (Second derivative of M)
* When discretising the derivative, it carries a "2" in the
* denominator which we "cancel" with the "2" in the prefactor,
* hence we do not put it explicitly in the calculations
*
* So, when computing the energy: (-1/2) * mu * Ms * H_ex
* we only put the 0.5 factor and don't worry about the "2"s in the
* field
*
*/
/* Define the coefficients */
double ax = 2 * A / (dx * dx);
double ay = 2 * A / (dy * dy);
double az = 2 * A / (dz * dz);
/* Here we iterate through every mesh node */
#pragma omp parallel for
for (int i = 0; i < n; i++) {
double fx = 0, fy = 0, fz = 0;
int idnm = 0; // Index for the magnetisation matrix
int idn = 6 * i; // index for the neighbours
/* Set a zero field for sites without magnetic material */
if (Ms_inv[i] == 0.0){
field[3 * i] = 0;
field[3 * i + 1] = 0;
field[3 * i + 2] = 0;
continue;
}
/* Here we iterate through the neighbours */
for (int j = 0; j < 6; j++) {
/* Remember that index=-1 is for sites without material */
if (ngbs[idn + j] >= 0) {
/* Magnetisation of the neighbouring spin since ngbs gives
* the neighbour's index */
idnm = 3 * ngbs[idn + j];
/* Check that the magnetisation of the neighbouring spin
* is larger than zero */
if (Ms_inv[ngbs[idn + j]] > 0){
/* Neighbours in the -x and +x directions
* giving: ( m[i-x] - m[i] ) + ( m[i+x] - m[i] )
* when ngbs[idn + j] > 0 for j = 0 and j=1
* If, for example, there is no
* neighbour at -x (j=0) in the 0th node (no PBCs),
* the second derivative would only be avaluated as:
* (1 / dx * dx) * ( m[i+x] - m[i] )
* which, according to
* [M.J. Donahue and D.G. Porter; Physica B, 343, 177-183 (2004)]
* when performing the integration of the energy, we still
* have error of the order O(dx^2)
* This same applies for the other directions
*/
if (j == 0 || j == 1) {
fx += ax * (m[idnm] - m[3 * i]);
fy += ax * (m[idnm + 1] - m[3 * i + 1]);
fz += ax * (m[idnm + 2] - m[3 * i + 2]);
}
/* Neighbours in the -y and +y directions */
else if (j == 2 || j == 3) {
fx += ay * (m[idnm] - m[3 * i]);
fy += ay * (m[idnm + 1] - m[3 * i + 1]);
fz += ay * (m[idnm + 2] - m[3 * i + 2]);
}
/* Neighbours in the -z and +z directions */
else if (j == 4 || j == 5) {
fx += az * (m[idnm] - m[3 * i]);
fy += az * (m[idnm + 1] - m[3 * i + 1]);
fz += az * (m[idnm + 2] - m[3 * i + 2]);
}
else {
continue; }
}
}
}
/* Energy as: (-mu0 * Ms / 2) * [ H_ex * m ] */
energy[i] = -0.5 * (fx * m[3 * i] + fy * m[3 * i + 1]
+ fz * m[3 * i + 2]);
/* Update the field H_ex which has the same structure than *m */
field[3 * i] = fx * Ms_inv[i] * MU0_INV;
field[3 * i + 1] = fy * Ms_inv[i] * MU0_INV;
field[3 * i + 2] = fz * Ms_inv[i] * MU0_INV;
}
}
inline int get_index(int nx, int ny, int i, int j, int k){
return k * nx*ny + j * nx + i;
}
void compute_exch_field_rkky_micro(double *m, double *field, double *energy, double *Ms_inv,
double sigma, int nx, double ny, double nz, int z_bottom, int z_top){
/* Compute the micromagnetic exchange field and energy using the
* matrix of neighbouring spins and a second order approximation
* for the derivative
*
* Ms_inv :: Array with the (1 / Ms) values for every mesh node.
* The values are zero for points with Ms = 0 (no material)
*
* sigma :: Exchange constant
*
* nx, ny, nz :: Mesh dimensions.
* The exchange field at the top (bottom) layer can be computed as:
*
* H_top = (sigma / (mu0 * Ms)) * m_bottom
* H_bottom = (sigma / (mu0 * Ms)) * m_top
*
* The *m array contains the spins as:
* [mx0, my0, mz0, mx1, my1, mz1, mx2, ...]
* so if we want the starting position of the magnetisation for the
* i-th spin, we only have to do (3 * i) for mx, (3 * i + 1) for my
* and (3 * i + 2) for mz
*
*
*
*/
int n = nx*ny*nz;
for (int i = 0; i < n; i++){
energy[i] = 0;
field[3*i]=0;
field[3*i+1]=0;
field[3*i+2]=0;
}
#pragma omp parallel for
for (int i = 0; i < nx; i++) {
for (int j = 0; j < ny; j++){
double mtx=0, mty=0, mtz=0;
double mbx=0, mby=0, mbz=0;
int id1 = get_index(nx,ny, i, j, z_bottom);
int id2 = get_index(nx,ny, i, j, z_top);
mtx = m[3*id2];
mty = m[3*id2+1];
mtz = m[3*id2+2];
mbx = m[3*id1];
mby = m[3*id1+1];
mbz = m[3*id1+2];
if (Ms_inv[id1] != 0.0){
energy[id1] = sigma*(1-mtx*mbx-mty*mby-mtz*mbz);
field[3*id1] = sigma * mtx * Ms_inv[id1] * MU0_INV;
field[3*id1+1] = sigma * mty * Ms_inv[id1] * MU0_INV;
field[3*id1+2] = sigma * mtz * Ms_inv[id1] * MU0_INV;
}
if (Ms_inv[id2] != 0.0){
energy[id2] = sigma*(1-mtx*mbx-mty*mby-mtz*mbz);
field[3*id2] = sigma * mbx * Ms_inv[id2] * MU0_INV;
field[3*id2+1] = sigma * mby * Ms_inv[id2] * MU0_INV;
field[3*id2+2] = sigma * mbz * Ms_inv[id2] * MU0_INV;
}
}
}
}
|
sample-2.c | #include <stdio.h>
#include <omp.h>
int main() {
int id,x;
omp_set_num_threads(100);
#pragma omp parallel private(id,x)
{
// int id,x;
id=omp_get_thread_num();
x=10*id;
printf("\n");
printf("Hello from thread %d, x= %d", id,x);
printf("\n");
}
return 0;
} |
data.h | /*!
* Copyright (c) 2015 by Contributors
* Modifications Copyright (c) 2020 by Secure XGBoost Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t {
kNumerical
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 11;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
this->labels_lower_bound_.Resize(that.labels_lower_bound_.Size());
this->labels_lower_bound_.Copy(that.labels_lower_bound_);
this->labels_upper_bound_.Resize(that.labels_upper_bound_.Size());
this->labels_upper_bound_.Copy(that.labels_upper_bound_);
return *this;
}
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
#ifdef __ENCLAVE__ // Required for handling multiple files correctly
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread, size_t line_offset=0);
#else
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
#endif
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
#ifdef __ENCLAVE__ // pass decryption key
bool is_encrypted,
char* key,
#endif
const std::string& file_format = "auto",
size_t page_size = kPageSize);
static DMatrix* Load(std::vector<const std::string>& uris,
bool silent,
bool load_row_split,
bool is_encrypted,
char* keys[],
const std::string& file_format = "auto",
const size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
template <typename AdapterT>
static DMatrix* Create(std::vector<AdapterT*> adapters, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
update_monodomain.c | //
// Created by sachetto on 13/10/17.
//
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include "../alg/grid/grid.h"
#include "../config/update_monodomain_config.h"
#include "../utils/utils.h"
#include "../single_file_libraries/stb_ds.h"
#ifdef COMPILE_CUDA
#include "../gpu_utils/gpu_utils.h"
#endif
UPDATE_MONODOMAIN(update_monodomain_default) {
real_cpu alpha;
bool use_gpu = the_ode_solver->gpu;
real_cpu beta = the_solver->beta;
real_cpu cm = the_solver->cm;
real_cpu dt_pde = the_solver->dt;
int n_equations_cell_model = the_ode_solver->model_data.number_of_ode_equations;
real *sv = the_ode_solver->sv;
#ifdef COMPILE_CUDA
real *vms = NULL;
size_t mem_size = initial_number_of_cells * sizeof(real);
if(use_gpu) {
vms = (real *)malloc(mem_size);
check_cuda_errors(cudaMemcpy(vms, sv, mem_size, cudaMemcpyDeviceToHost));
}
#endif
int i;
#pragma omp parallel for private(alpha)
for(i = 0; i < num_active_cells; i++) {
alpha = ALPHA(beta, cm, dt_pde, active_cells[i]->discretization.x, active_cells[i]->discretization.y, active_cells[i]->discretization.z);
if(use_gpu) {
#ifdef COMPILE_CUDA
active_cells[i]->b = vms[active_cells[i]->sv_position] * alpha;
#endif
} else {
active_cells[i]->b = sv[active_cells[i]->sv_position * n_equations_cell_model] * alpha;
}
}
#ifdef COMPILE_CUDA
free(vms);
#endif
}
UPDATE_MONODOMAIN(update_monodomain_ddm)
{
real_cpu alpha;
bool use_gpu = the_ode_solver->gpu;
real_cpu beta = the_solver->beta;
real_cpu cm = the_solver->cm;
real_cpu dt_pde = the_solver->dt;
int n_equations_cell_model = the_ode_solver->model_data.number_of_ode_equations;
real *sv = the_ode_solver->sv;
#ifdef COMPILE_CUDA
real *vms = NULL;
size_t mem_size = initial_number_of_cells * sizeof(real);
if(use_gpu)
{
vms = (real *)malloc(mem_size);
check_cuda_errors(cudaMemcpy(vms, sv, mem_size, cudaMemcpyDeviceToHost));
}
#endif
int i;
#pragma omp parallel for private(alpha)
for(i = 0; i < num_active_cells; i++)
{
// 1) Calculate alpha for the diagonal element
alpha = ALPHA(beta, cm, dt_pde, active_cells[i]->discretization.x, active_cells[i]->discretization.y, active_cells[i]->discretization.z);
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b = vms[active_cells[i]->sv_position] * alpha;
#endif
}
else
{
active_cells[i]->b = sv[active_cells[i]->sv_position * n_equations_cell_model] * alpha;
}
// 2) Calculate kappas
// We need to capture the neighbours from the current volume
struct element *cell_elements = active_cells[i]->elements;
uint32_t max_elements = arrlen(cell_elements);
real_cpu dx = active_cells[i]->discretization.x;
real_cpu dy = active_cells[i]->discretization.y;
real_cpu dz = active_cells[i]->discretization.z;
real_cpu kappa_x = active_cells[i]->kappa.x;
real_cpu kappa_y = active_cells[i]->kappa.y;
real_cpu kappa_z = active_cells[i]->kappa.z;
for (int j = 1; j < max_elements; j++)
{
int k = cell_elements[j].column;
if (cell_elements[j].direction == 'n') // North cell
{
real_cpu multiplier = (dx * dy) / dz;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_z / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
}
}
else if (cell_elements[j].direction == 's') // South cell
{
real_cpu multiplier = (dx * dy) / dz;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_z / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_z / dt_pde;
}
}
else if (cell_elements[j].direction == 'e') // East cell
{
real_cpu multiplier = (dx * dz) / dy;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_y / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
}
}
else if (cell_elements[j].direction == 'w') // West cell
{
real_cpu multiplier = (dx * dz) / dy;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_y / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_y / dt_pde;
}
}
else if (cell_elements[j].direction == 'f') // Forward cell
{
real_cpu multiplier = (dy * dz) / dx;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_x / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
}
}
else if (cell_elements[j].direction == 'b') // Backward cell
{
real_cpu multiplier = (dy * dz) / dx;
if(use_gpu)
{
#ifdef COMPILE_CUDA
active_cells[i]->b -= vms[active_cells[k]->sv_position] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += vms[active_cells[i]->sv_position] * multiplier * kappa_x / dt_pde;
#endif
}
else
{
active_cells[i]->b -= sv[active_cells[k]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
active_cells[i]->b += sv[active_cells[i]->sv_position * n_equations_cell_model] * multiplier * kappa_x / dt_pde;
}
}
}
}
#ifdef COMPILE_CUDA
free(vms);
#endif
}
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif //ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#endif
//Precision to use for calculations
#define fptype float
#define NUM_RUNS 100
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i=begin; i!=end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-5 ){
fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j=0; j<NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 0;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
return 0;
}
#endif //ENABLE_TBB
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
if (argc != 4)
{
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
exit(1);
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
//Read input data from file
file = fopen(inputFile, "r");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", inputFile);
exit(1);
}
rv = fscanf(file, "%i", &numOptions);
if(rv != 1) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval);
if(rv != 9) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", inputFile);
exit(1);
}
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
HANDLE *threads;
int *nums;
threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE));
nums = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
free(tids);
#endif //WIN32
#else //ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid=0;
bs_thread(&tid);
#else //ENABLE_TBB
//serial version
int tid=0;
bs_thread(&tid);
#endif //ENABLE_TBB
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
GB_bitmap_expand_to_hyper.c | //------------------------------------------------------------------------------
// GB_bitmap_expand_to_hyper: expand a compact bitmap C to hypersparse
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#define GB_FREE_ALL \
{ \
GB_phbix_free (C) ; \
GB_FREE (&Cp, Cp_size) ; \
GB_FREE (&Ch, Ch_size) ; \
GB_FREE (&Ci, Ci_size) ; \
}
#include "GB_mxm.h"
GrB_Info GB_bitmap_expand_to_hyper
(
// input/output:
GrB_Matrix C,
// input
int64_t cvlen_final,
int64_t cvdim_final,
GrB_Matrix A,
GrB_Matrix B,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (C != NULL && (GB_IS_BITMAP (C) || GB_IS_FULL (C))) ;
ASSERT (A != NULL && B != NULL) ;
GBURBLE ("(expand bitmap/full to hyper) ") ;
ASSERT_MATRIX_OK (C, "C to expand from bitmap/full to hyper", GB0) ;
ASSERT_MATRIX_OK (A, "A for expand C from bitmap/full to hyper", GB0) ;
ASSERT_MATRIX_OK (B, "B for expand C from bitmap/full to hyper", GB0) ;
int64_t cvlen = C->vlen ;
int64_t cvdim = C->vdim ;
int64_t cnz = cvlen * cvdim ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
// C is currently a subset of its final dimension, in bitmap or full form.
// It is converted back into sparse/hypersparse form, with zombies if
// bitmap, and expanded in size to be cvlen_final by cvdim_final (A->vdim
// by B->vdim for C=A'*B, or A->vlen by B->vdim for C=A*B).
//----------------------------------------------------------------------
// allocate the sparse/hypersparse structure of the final C
//----------------------------------------------------------------------
int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ;
int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ;
Cp = GB_MALLOC (cvdim+1, int64_t, &Cp_size) ;
Ch = NULL ;
if (B_is_hyper)
{
Ch = GB_MALLOC (cvdim, int64_t, &Ch_size) ;
}
Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ;
if (Cp == NULL || (B_is_hyper && Ch == NULL) || Ci == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// construct the hyperlist of C, if B is hypersparse
//----------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ;
if (B_is_hyper)
{
// C becomes hypersparse
ASSERT (cvdim == B->nvec) ;
GB_memcpy (Ch, B->h, cvdim * sizeof (int64_t), nthreads) ;
}
//----------------------------------------------------------------------
// construct the vector pointers of C
//----------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cvdim+1 ; pC++)
{
Cp [pC] = pC * cvlen ;
}
//----------------------------------------------------------------------
// construct the pattern of C from its bitmap
//----------------------------------------------------------------------
// C(i,j) becomes a zombie if not present in the bitmap
nthreads = GB_nthreads (cnz, chunk, nthreads_max) ;
int8_t *restrict Cb = C->b ;
bool C_is_bitmap = (Cb != NULL) ;
if (C_is_bitmap)
{
// C is bitmap
if (A_is_hyper)
{
// only for C=A'*B
GrB_Index *restrict Ah = (GrB_Index *) A->h ;
ASSERT (cvlen == A->nvec) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = Ah [pC % cvlen] ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
else
{
// for C=A'*B or C=A*B
ASSERT (cvlen == cvlen_final) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = pC % cvlen ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
}
else
{
// C is full
if (A_is_hyper)
{
// only for C=A'*B
GrB_Index *restrict Ah = (GrB_Index *) A->h ;
ASSERT (cvlen == A->nvec) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = Ah [pC % cvlen] ;
Ci [pC] = i ;
}
}
else
{
// for C=A'*B or C=A*B
ASSERT (cvlen == cvlen_final) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = pC % cvlen ;
Ci [pC] = i ;
}
}
}
//----------------------------------------------------------------------
// transplant the new content and finalize C
//----------------------------------------------------------------------
C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ;
C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ;
C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ;
C->nzombies = (C_is_bitmap) ? (cnz - C->nvals) : 0 ;
C->vdim = cvdim_final ;
C->vlen = cvlen_final ;
C->nvals = -1 ;
C->nvec = cvdim ;
C->plen = cvdim ;
C->nvec_nonempty = (cvlen == 0) ? 0 : cvdim ;
// free the bitmap, if present
GB_FREE ((&C->b), C->b_size) ;
// C is now sparse or hypersparse
ASSERT_MATRIX_OK (C, "C expanded from bitmap/full to hyper", GB0) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
return (GrB_SUCCESS) ;
}
|
homp_dev.c | /*
* homp_dev.c
*
* contains dev-specific implementation of homp.h functions, mainly those with
*
* Created on: Oct 4, 2014
* Author: yy8
*/
/**
* an easy way for defining dev-specific code:
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
#elif defined (DEVICE_THSIM)
#else
#endif
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <unistd.h>
#include <math.h>
#include "homp.h"
#include "../util/iniparser.h"
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
#include <cublas_v2.h>
inline void devcall_nvgpu_cuda_errchk(int code, char *file, int line, int ab) {
if (code != cudaSuccess) {
fprintf(stderr,"NVGPU_CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (ab) { abort();}
}
}
#define devcall_nvgpu_cuda_assert(ecode) { devcall_nvgpu_cuda_errchk((ecode), __FILE__, __LINE__, 1); }
#endif
#if defined (DEVICE_OPENCL_SUPPORT)
inline void devcall_opencl_errchk(int code, char *file, int line, int ab) {
if (code != CL_SUCCESS) {
fprintf(stderr,"OpenCL assert: %d %s %d\n", code, file, line);
if (ab) { abort();}
}
}
#define devcall_opencl_assert(ecode) { devcall_opencl_errchk((ecode), __FILE__, __LINE__, 1); }
#endif
double addmul(double add, double mul, int ops) {
// need to initialise differently otherwise compiler might optimise away
double sum1 = 0.1, sum2 = -0.1, sum3 = 0.2, sum4 = -0.2, sum5 = 0.0;
double mul1 = 1.0, mul2 = 1.1, mul3 = 1.2, mul4 = 1.3, mul5 = 1.4;
int loops = ops / 10; // we have 10 floating point ops inside the loop
double expected = 5.0 * add * loops + (sum1 + sum2 + sum3 + sum4 + sum5)
+ pow(mul, loops) * (mul1 + mul2 + mul3 + mul4 + mul5);
int i;
for (i = 0; i < loops; i++) {
mul1 *= mul;
mul2 *= mul;
mul3 *= mul;
mul4 *= mul;
mul5 *= mul;
sum1 += add;
sum2 += add;
sum3 += add;
sum4 += add;
sum5 += add;
}
return sum1 + sum2 + sum3 + sum4 + sum5 + mul1 + mul2 + mul3 + mul4 + mul5 - expected;
}
double cpu_sustain_gflopss(double *flopss) {
double x = M_PI;
double y = 1.0 + 1e-8;
int n = 1000000;
double timer = read_timer();
x = addmul(x, y, n);
timer = read_timer() - timer;
*flopss = n / timer / 1e9;
}
/* OpenMP 4.0 support */
int default_device_var = -1;
void omp_set_default_device(int device_num) {
default_device_var = device_num;
}
int omp_get_default_device(void) {
return default_device_var;
}
int omp_get_num_devices() {
return omp_num_devices;
}
int omp_num_devices;
omp_device_t *omp_devices;
pthread_barrier_t all_dev_sync_barrier;
volatile int omp_device_complete = 0;
volatile int omp_printf_turn = 0;
/* a simple mechanism to allow multiple dev shepherd threads to print in turn so the output do not scramble together */
omp_device_type_info_t omp_device_types[OMP_NUM_DEVICE_TYPES] = {
{OMP_DEVICE_HOSTCPU, "OMP_DEVICE_HOSTCPU", "HOSTCPU", 1},
{OMP_DEVICE_NVGPU, "OMP_DEVICE_NVGPU", "NVGPU", 0},
{OMP_DEVICE_ITLGPU, "OMP_DEVICE_ITLGPU", "ITLGPU", 0},
{OMP_DEVICE_ITLMIC, "OMP_DEVICE_ITLMIC", "ITLMIC", 0},
{OMP_DEVICE_TIDSP, "OMP_DEVICE_TIDSP", "TIDSP", 0},
{OMP_DEVICE_AMDAPU, "OMP_DEVICE_AMDAPU", "AMDAPU", 0},
{OMP_DEVICE_THSIM, "OMP_DEVICE_THSIM", "THSIM", 0},
{OMP_DEVICE_REMOTE, "OMP_DEVICE_REMOTE", "REMOTE", 0},
{OMP_DEVICE_LOCALPS, "OMP_DEVICE_LOCALPS", "LOCALPS", 0}
};
/* APIs to support multiple devices: */
char *omp_supported_device_types() { /* return a list of devices supported by the compiler in the format of TYPE1:TYPE2 */
/* FIXME */
return "OMP_DEVICE_HOSTCPU";
}
omp_device_type_t omp_get_device_type(int devid) {
return omp_devices[devid].type;
}
char *omp_get_device_type_as_string(int devid) {
return omp_device_types[omp_devices[devid].type].name;
}
int omp_get_num_itlgpu_dev_ids(omp_device_type_t type) { /* current omp has omp_get_num_devices(); */
return omp_device_types[type].num_devs;
}
int omp_get_num_devices_of_type(omp_device_type_t type) {
return omp_device_types[type].num_devs;
}
/*
* return the first ndev device IDs of the specified type, the function returns the actual number of devices
* in the array (devnum_array), it is <= to the total number of devices of the specified type
*
* before calling this function, the caller should allocate the devnum_array[ndev]
*/
int omp_get_devices(omp_device_type_t type, int *devnum_array,
int ndev) { /* return a list of devices of the specified type */
int i;
int num = 0;
for (i = 0; i < omp_num_devices; i++)
if (omp_devices[i].type == type) {
devnum_array[num] = omp_devices[i].id;
num++;
if (num == ndev) break;
}
return num;
}
omp_device_t *omp_get_device(int id) {
return &omp_devices[id];
}
int omp_get_num_active_devices() {
int num_dev;
char *ndev = getenv("OMP_NUM_ACTIVE_DEVICES");
if (ndev != NULL) {
num_dev = atoi(ndev);
if (num_dev == 0 || num_dev > omp_num_devices) num_dev = omp_num_devices;
} else {
num_dev = omp_num_devices;
}
return num_dev;
}
void omp_init_hostcpu_device(omp_device_t *dev, int id, int sysid, int num_cores) {
dev->type = OMP_DEVICE_HOSTCPU;
dev->id = id;
dev->sysid = sysid;
dev->default_stream.dev = dev;
dev->default_stream.systream.myStream = NULL;
dev->mem_type = OMP_DEVICE_MEM_SHARED_CC_NUMA;
dev->dev_properties = &dev->helperth; /* make it point to the thread id */
dev->num_cores = num_cores;
//dev->num_cores = sysconf( _SC_NPROCESSORS_ONLN );
double dummy = cpu_sustain_gflopss(&dev->flopss_percore);
dev->total_real_flopss = dev->num_cores * dev->flopss_percore;
dev->bandwidth = 600 * 1000; /* GB/s */
dev->latency = 0.02; /* us, i.e. 20 ns */
}
void *omp_init_thsim_device(omp_device_t *dev, int id, int sysid, int num_cores) {
dev->type = OMP_DEVICE_THSIM;
dev->id = id;
dev->sysid = sysid;
dev->default_stream.dev = dev;
dev->default_stream.systream.myStream = NULL;
dev->mem_type = OMP_DEVICE_MEM_DISCRETE;
dev->dev_properties = &dev->helperth; /* make it point to the thread id */
dev->num_cores = num_cores;
/*
dev->num_cores = omp_host_dev->num_cores;
dev->flopss_percore = omp_host_dev->flopss_percore;
dev->total_real_flopss = omp_host_dev->total_real_flopss*(1+dev->id);
dev->bandwidth = (2*(1+dev->id))*omp_host_dev->bandwidth / 100;
dev->latency = (1+dev->id)*omp_host_dev->latency * 1000;
*/
}
void omp_init_nvgpu_device(omp_device_t *dev, int id, int sysid) {
dev->type = OMP_DEVICE_NVGPU;
dev->id = id;
dev->sysid = sysid;
dev->default_stream.dev = dev;
dev->default_stream.systream.myStream = NULL;
dev->mem_type = OMP_DEVICE_MEM_DISCRETE;
}
void omp_init_itlmic_device(omp_device_t *dev, int id, int sysid, int num_cores) {
dev->type = OMP_DEVICE_ITLMIC;
dev->id = id;
dev->sysid = sysid;
dev->default_stream.dev = dev;
dev->default_stream.systream.myStream = NULL;
dev->mem_type = OMP_DEVICE_MEM_DISCRETE;
dev->num_cores = num_cores;
}
void omp_init_itlgpu_device(omp_device_t *dev, int id, int sysid, int num_cores) {
dev->type = OMP_DEVICE_ITLGPU;
dev->id = id;
dev->sysid = sysid;
dev->default_stream.dev = dev;
dev->default_stream.systream.myStream = NULL;
dev->mem_type = OMP_DEVICE_MEM_SHARED;
dev->num_cores = num_cores;
#if defined (DEVICE_OPENCL_SUPPORT)
// Default substring for platform name
const char* required_platform_subname = "Intel";
cl_int err = CL_SUCCESS;
// Query for all available OpenCL platforms on the system
cl_uint num_of_platforms = 0;
// get total number of available platforms:
err = clGetPlatformIDs(0, 0, &num_of_platforms);
devcall_opencl_assert(err);
cout << "Number of available platforms: " << num_of_platforms << endl;
cl_platform_id platforms[num_of_platforms];
// get IDs for all platforms:
err = clGetPlatformIDs(num_of_platforms, platforms, 0);
devcall_opencl_assert(err);
// List all platforms and select one.
// We use platform name to select needed platform.
cl_uint selected_platform_index = num_of_platforms;
int i;
for(i = 0; i < num_of_platforms; ++i)
{
// Get the length for the i-th platform name
size_t platform_name_length = 0;
err = clGetPlatformInfo(platforms[i], CL_PLATFORM_NAME, 0, 0, &platform_name_length);
devcall_opencl_assert(err);
// Get the name itself for the i-th platform
char platform_name[platform_name_length];
err = clGetPlatformInfo(platforms[i], CL_PLATFORM_NAME, platform_name_length, platform_name, 0);
devcall_opencl_assert(err);
// decide if this i-th platform is what we are looking for
// we select the first one matched skipping the next one if any
if(strstr(platform_name, required_platform_subname) &&
selected_platform_index == num_of_platforms) { // have not selected yet
selected_platform_index = i;
break;
}
}
if(selected_platform_index == num_of_platforms) {
fprintf(stderr, "There is no found platform with name containing %s as a substring.\n", required_platform_subname);
return;
}
cl_platform_id platform = platforms[selected_platform_index];
// check intel GPU
int num_itlgpu;
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, 0, &num_itlgpu);
if(CL_DEVICE_NOT_FOUND == err) {
num_itlgpu = 0;
err = CL_SUCCESS;
}
devcall_opencl_assert(err);
if (sysid >= num_itlgpu) {
fprintf(stderr, "The specified sysid %d for Intel GPU is not avaialble. Total Intel GPU: %d\n", num_itlgpu);
return;
} else {
// get a piece of useful capabilities information for each device.
// Retrieve a list of device IDs with type selected by type_index
cl_device_id itlgpu_dev_ids[num_itlgpu];
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, num_itlgpu, itlgpu_dev_ids, 0);
devcall_opencl_assert(err);
dev->dev_properties = (void*)itlgpu_dev_ids[sysid];
//dev->dev_properties = (cl_device_id*)malloc(sizeof(cl_device_id));
//memcpy(dev->dev_properties, &itlgpu_dev_ids[sysid], sizeof(cl_device_id));
}
#endif
}
void omp_util_copy_device_object(omp_device_t *newone, omp_device_t *src, int newid, int newsysid) {
memcpy(newone, src, sizeof(omp_device_t));
newone->id = newid;
newone->sysid = newsysid;
newone->default_stream.dev = newone;
newone->default_stream.systream.myStream = NULL;
newone->dev_properties = &newone->helperth;
}
int num_hostcpu_dev = 0;
int num_thsim_dev = 0; /* the thsim actually */
/* for NVDIA GPU devices */
int num_nvgpu_dev = 0;
int num_itlgpu_dev = 0;
int num_itlmic_dev = 0;
/* we allow the same type of device to be specified using a simple form, e.g.
*
*
[cpu]
num = 10;
id = 0
type = cpu
ncores = 8
FLOPss = 2 # GFLOPS/s
Bandwidth = 600000 # MB/s
Latency = 0 #us
*/
void omp_read_device_spec(char *dev_spec_file) {
dictionary *ini;
ini = iniparser_load(dev_spec_file);
if (ini == NULL) {
fprintf(stderr, "cannot parse file: %s\n", dev_spec_file);
abort();
}
//iniparser_dump(ini, stderr);
int num_sections = iniparser_getnsec(ini);
int i;
/* count total number of devices */
omp_num_devices = 0;
char devname[32];
char keyname[48];
for (i = 0; i < num_sections; i++) {
sprintf(devname, "%s", iniparser_getsecname(ini, i));
sprintf(keyname, "%s:%s", devname, "num");
int num_devs = iniparser_getint(ini, keyname, 1);
if (num_devs > 0) omp_num_devices += num_devs;
}
omp_devices = malloc(sizeof(omp_device_t) * (omp_num_devices));
int devid = 0;
for (i = 0; i < num_sections; i++) {
sprintf(devname, "%s", iniparser_getsecname(ini, i));
sprintf(keyname, "%s:%s", devname, "num");
int num_devs = iniparser_getint(ini, keyname, 1);
if (num_devs <= 0) continue;
omp_device_t *dev = &omp_devices[devid];
sprintf(devname, "%s", iniparser_getsecname(ini, i));
sprintf(keyname, "%s:%s", devname, "sysid");
int devsysid = iniparser_getint(ini, keyname, -1);
sprintf(dev->name, "%s:%d", devname, devsysid);
sprintf(keyname, "%s:%s", devname, "ncores");
int num_cores = iniparser_getint(ini, keyname, 1);
char *devtype;
sprintf(keyname, "%s:%s", devname, "type");
devtype = iniparser_getstring(ini, keyname, "NULL");
if (strcasecmp(devtype, "cpu") == 0 || strcasecmp(devtype, "hostcpu") == 0) {
omp_init_hostcpu_device(dev, devid, devsysid, num_cores);
num_hostcpu_dev += num_devs;
} else if (strcasecmp(devtype, "nvgpu") == 0) {
omp_init_nvgpu_device(dev, devid, devsysid);
num_nvgpu_dev += num_devs;
} else if (strcasecmp(devtype, "thsim") == 0) {
omp_init_thsim_device(dev, devid, devsysid, num_cores);
num_thsim_dev += num_devs;
} else if (strcasecmp(devtype, "itlgpu") == 0) {
omp_init_itlgpu_device(dev, devid, devsysid, num_cores);
num_itlgpu_dev += num_devs;
} else if (strcasecmp(devtype, "itlmic") == 0) {
omp_init_itlmic_device(dev, devid, devsysid, num_cores);
num_itlmic_dev += num_devs;
} else {
printf("unknow device type error: %s \n, default to be hostcpu\n", devtype);
/* unknow device type error */
}
sprintf(keyname, "%s:%s", devname, "flopss");
dev->total_real_flopss = iniparser_getdouble(ini, keyname, -1);
sprintf(keyname, "%s:%s", devname, "Bandwidth");
dev->bandwidth = iniparser_getdouble(ini, keyname, -1);
sprintf(keyname, "%s:%s", devname, "Latency");
dev->latency = iniparser_getdouble(ini, keyname, 0.00000000001);
sprintf(keyname, "%s:%s", devname, "Memory");
char *mem = iniparser_getstring(ini, keyname, "default"); /* or shared */
if (strcasecmp(mem, "shared") == 0) {
dev->mem_type = OMP_DEVICE_MEM_SHARED;
} else if (strcasecmp(mem, "discrete") == 0) {
dev->mem_type = OMP_DEVICE_MEM_DISCRETE;
} else {
/* using default, already done in init_*_device call */
}
devid++;
/* repeating the same type of devices */
int j;
for (j = 1; j < num_devs; j++) {
omp_device_t *newd = &omp_devices[devid];
omp_util_copy_device_object(newd, dev, devid, devsysid + j);
sprintf(newd->name, "%s:%d", devname, devsysid + j);
devid++;
}
}
iniparser_freedict(ini);
}
/**
* So far, we only probe CPU and NVGPU devices.
*
* TODO: to probe OpenCL-capable device and Intel MIC
*/
void omp_probe_devices() {
/* query hardware device */
/* OMP_HOSTCPU_AS_DEVICE=true|true:4|false */
char true_false[6];
char *host_as_dev_str = getenv("OMP_HOSTCPU_AS_DEVICE");
if (host_as_dev_str != NULL) {
sscanf(host_as_dev_str, "%s", &true_false);
if (strncasecmp(host_as_dev_str, "false", 5) == 0) {
// printf("host as device: false\n");
} else if (strncasecmp(host_as_dev_str, "true", 4) == 0) {
if (host_as_dev_str[4] == ':') {
sscanf(host_as_dev_str + 5, "%d", &num_hostcpu_dev);
if (num_hostcpu_dev < 0) num_hostcpu_dev = 1;
}
// printf("host as device: true, #cores: %d\n", num_cores_host_dev);
} else {
printf("Unrecognized OMP_HOSTCPU_AS_DEVICE value(%s), use default: false\n", host_as_dev_str);
}
} else {
// printf("default: false\n");
}
omp_num_devices += num_hostcpu_dev;
char *num_thsim_dev_str = getenv("OMP_NUM_THSIM_DEVICES");
if (num_thsim_dev_str != NULL) {
sscanf(num_thsim_dev_str, "%d", &num_thsim_dev);
if (num_thsim_dev < 0) num_thsim_dev = 0;
} else num_thsim_dev = 0;
omp_num_devices += num_thsim_dev;
/* for NVDIA GPU ACC devices */
int total_nvgpu = 0;
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result = cudaGetDeviceCount(&total_nvgpu);
devcall_nvgpu_cuda_assert(result);
#endif
int nvgpu_selection[total_nvgpu];
int i;
for (i = 0; i < total_nvgpu; i++) nvgpu_selection[i] = 0;
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
if (total_nvgpu > 0) {
char * nvgpu_dev_str = getenv("OMP_NVGPU_DEVICES");
if (nvgpu_dev_str != NULL ) {
char * token = strtok(nvgpu_dev_str, ",");
while(token != NULL) {
int gpuid;
sscanf(token, "%d", &gpuid);
nvgpu_selection[gpuid] = 1;
num_nvgpu_dev ++;
token = strtok(NULL, ",");
}
} else {
char * num_nvgpu_dev_str = getenv("OMP_NUM_NVGPU_DEVICES");
if (num_nvgpu_dev_str != NULL ) {
sscanf(num_nvgpu_dev_str, "%d", &num_nvgpu_dev);
if (num_nvgpu_dev > total_nvgpu || num_nvgpu_dev < 0) num_nvgpu_dev = total_nvgpu;
} else num_nvgpu_dev = total_nvgpu;
for (i=0; i<num_nvgpu_dev;i++) nvgpu_selection[i] = 1;
}
omp_num_devices += num_nvgpu_dev;
omp_device_types[OMP_DEVICE_NVGPU].num_devs = num_nvgpu_dev;
}
#endif
omp_devices = malloc(sizeof(omp_device_t) * (omp_num_devices));
int host_dev_sysid = 0;
int thsim_dev_sysid = 0;
int nvgpu_dev_sysid = 0;
for (i = 0; i < omp_num_devices; i++) {
omp_device_t *dev = &omp_devices[i];
if (i < num_hostcpu_dev) {
omp_init_hostcpu_device(dev, i, host_dev_sysid, 1);
sprintf(dev->name, "%s:%d", omp_get_device_typename(dev), host_dev_sysid);
host_dev_sysid++;
} else if (i < num_thsim_dev + num_hostcpu_dev) {
omp_init_thsim_device(dev, i, thsim_dev_sysid, 1);
sprintf(dev->name, "%s:%d", omp_get_device_typename(dev), thsim_dev_sysid);
thsim_dev_sysid++;
} else if (i < num_nvgpu_dev + num_hostcpu_dev + num_thsim_dev) {
for (; nvgpu_dev_sysid < total_nvgpu; nvgpu_dev_sysid++) {
if (nvgpu_selection[nvgpu_dev_sysid]) {
break;
}
}
omp_init_nvgpu_device(dev, i, nvgpu_dev_sysid);
sprintf(dev->name, "%s:%d", omp_get_device_typename(dev), nvgpu_dev_sysid);
nvgpu_dev_sysid++;
} else {
/* TODO: unknown device type error */
}
}
}
/* init the device objects, num_of_devices, helper threads, default_device_var ICV etc
*
*/
int omp_init_devices() {
/* query hardware device */
omp_num_devices = 0; /* we always have at least host device */
int i;
char *dev_spec_file = getenv("OMP_DEV_SPEC_FILE");
if (dev_spec_file != NULL) {
omp_read_device_spec(dev_spec_file);
} else {
#ifdef DEVICE_AUTO_PROBE
printf("The device specifications can be provided through system probing:\n");
printf("\tTo help system probing and customize configuration, using the following environment variable\n");
printf("\t\tOMP_HOSTCPU_AS_DEVICE for enabling hostcpu as devices, e.g. true|TRUE:4|false, default false.\n");
printf("\t\t\tTRUE:4, means 4 hostcpu to be used as devices\n");
printf("\t\tOMP_NUM_THSIM_DEVICES for selecting a number of THSIM devices (default 0)\n");
printf("\t\tOMP_NUM_NVGPU_DEVICES for selecting a number of NVIDIA GPU devices from dev 0 (default, total available).\n");
printf("\t\t\tThis variable is overwritten by OMP_NVGPU_DEVICES).\n");
printf("\t\tOMP_NVGPU_DEVICES for selecting specific NVGPU devices (e.g., \"0,2,3\",no spaces)\n");
printf("=====================================================================================================================\n");
omp_probe_devices();
#else
printf("The OMP_DEV_SPEC_FILE variable is needed to set for a device specification file!\n");
exit(1);
#endif
}
if (omp_num_devices) {
default_device_var = 0;
} else {
default_device_var = -1;
}
omp_device_types[OMP_DEVICE_HOSTCPU].num_devs = num_hostcpu_dev;
omp_device_types[OMP_DEVICE_THSIM].num_devs = num_thsim_dev;
omp_device_types[OMP_DEVICE_NVGPU].num_devs = num_nvgpu_dev;
omp_device_types[OMP_DEVICE_ITLGPU].num_devs = num_itlgpu_dev;
omp_device_types[OMP_DEVICE_ITLMIC].num_devs = num_itlmic_dev;
/* create the helper thread for each device */
/* the helper thread setup */
pthread_attr_t attr;
pthread_attr_init(&attr);
/* initialize attr with default attributes */
pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_setconcurrency(omp_num_devices + 1);
pthread_barrier_init(&all_dev_sync_barrier, NULL, omp_num_devices + 1);
for (i = 0; i < omp_num_devices; i++) {
omp_device_t *dev = &omp_devices[i];
dev->status = 1;
dev->resident_data_maps = NULL;
dev->offload_request = NULL;
dev->offload_stack_top = -1;
int rt = pthread_create(&dev->helperth, &attr, (void *(*)(void *)) helper_thread_main, (void *) dev);
if (rt) {
fprintf(stderr, "cannot create helper threads for devices.\n");
exit(1);
}
}
printf("============================================================================================================================\n");
printf("Total %d devices: %d HOSTCPU, %d NVGPU, %d ITLMIC, %d ITLGPU and %d THSIM; default dev: %d.\n",
omp_num_devices,
num_hostcpu_dev, num_nvgpu_dev, num_itlmic_dev, num_itlgpu_dev, num_thsim_dev, default_device_var);
for (i = 0; i < omp_num_devices; i++) {
omp_device_t *dev = &omp_devices[i];
char *mem_type = "SHARED";
if (dev->mem_type == OMP_DEVICE_MEM_DISCRETE) {
mem_type = "DISCRETE";
}
printf(" %d|sysid: %d, type: %s, name: %s, ncores: %d, mem: %s, flops: %0.2fGFLOPS/s, bandwidth: %.2fMB/s, latency: %.2fus\n",
dev->id, dev->sysid, omp_get_device_typename(dev), dev->name, dev->num_cores, mem_type,
dev->total_real_flopss, dev->bandwidth,
dev->latency);
//printf("\t\tstream dev: %s\n", dev->default_stream.dev->name);
if (dev->type == OMP_DEVICE_NVGPU) {
if (dev->mem_type == OMP_DEVICE_MEM_DISCRETE) {
#if defined(DEVICE_NVGPU_CUDA_VSHAREDM)
printf("\t\tUnified Memory is supported in the runtime, but this device is not set to use it. To use it, enable shared mem in the dev spec(Memory=shared)\n");
#endif
}
if (dev->mem_type == OMP_DEVICE_MEM_SHARED) {
#if defined(DEVICE_NVGPU_CUDA_VSHAREDM)
#else
printf("\t\tUnified Memory is NOT supported in the runtime, fall back to discrete memory for this device. To enable shared mem support in runtime, set the DEVICE_NVGPU_CUDA_VSHAREDM macro.\n");
dev->mem_type = OMP_DEVICE_MEM_DISCRETE;
#endif
}
}
}
pthread_barrier_wait(&all_dev_sync_barrier);
LOOP_DIST_POLICY = omp_read_dist_policy_options(&LOOP_DIST_CHUNK_SIZE, &LOOP_DIST_CUTOFF_RATIO);
omp_print_homp_usage();
return omp_num_devices;
}
void omp_warmup_device(omp_device_t *dev) {
omp_device_type_t devtype = dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cublasCreate(&dev->cublas_handle);
dev->dev_properties = (struct cudaDeviceProp*)malloc(sizeof(struct cudaDeviceProp));
cudaSetDevice(dev->sysid);
cudaGetDeviceProperties(dev->dev_properties, dev->sysid);
/* warm up the device */
void * dummy_dev;
char dummy_host[1024];
cudaMalloc(&dummy_dev, 1024);
cudaMemcpy(dummy_dev, dummy_host, 1024, cudaMemcpyHostToDevice);
cudaMemcpy(dummy_host, dummy_dev, 1024, cudaMemcpyDeviceToHost);
cudaFree(dummy_dev);
#endif
} else if (devtype == OMP_DEVICE_ITLGPU) {
/* we already init property when read in from the file */
#if defined (DEVICE_OPENCL_SUPPORT)
cl_int err;
// Create a context
cl_context context;
context = clCreateContext(0, 1, (cl_device_id*)&dev->properties, NULL, NULL, &err);
dev->default_context = (void*)context;
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
int SIZE = 1024;
char dummy[SIZE];
int i;
#pragma offload target(mic:dev->sysid) in (dummy:length(SIZE)) out(dummy:length(SIZE))
{
#pragma omp parallel for simd
for (i=0; i<SIZE; i++){
dummy[i] = dummy[i] * 7;
}
}
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
/* warm up the OpenMP environment */
/*
int i;
int dummy_size = dev->num_cores * 100;
float dummy_array[dummy_size];
#pragma omp parallel for shared(dummy_size, dummy_array) private (i)
for (i = 0; i < dummy_size; i++) {
dummy_array[i] *= i * dummy_array[(i + dev->num_cores) % dummy_size];
}
*/
} else {
/* abort(); unknow device type */
}
}
// terminate helper threads
void omp_fini_devices() {
int i;
omp_device_complete = 1;
for (i = 0; i < omp_num_devices; i++) {
omp_device_t *dev = &omp_devices[i];
int rt = pthread_join(dev->helperth, NULL);
omp_device_type_t devtype = dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
free(dev->dev_properties);
cublasDestroy(dev->cublas_handle);
#endif
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clReleaseCommandQueue(dev->default_stream.systream.clqueue);
clReleaseContext((cl_context)dev->default_context);
#endif
}
}
pthread_barrier_destroy(&all_dev_sync_barrier);
free(omp_devices);
}
int omp_set_current_device_dev(omp_device_t *d) {
int result;
if (d->type == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
result = cudaSetDevice(d->sysid);
devcall_nvgpu_cuda_assert (result);
#endif
}
return d->id;
}
void omp_map_mapto(omp_data_map_t *map) {
if (map->map_type == OMP_DATA_MAP_COPY)
omp_map_memcpy_to((void *) map->map_dev_wextra_ptr, map->dev, (void *) map->map_source_wextra_ptr,
map->map_wextra_size);
}
void omp_map_mapto_async(omp_data_map_t *map, omp_dev_stream_t *stream) {
if (map->map_type == OMP_DATA_MAP_COPY) {
omp_map_memcpy_to_async((void *) map->map_dev_wextra_ptr, map->dev, (void *) map->map_source_wextra_ptr,
map->map_wextra_size, stream);
// printf("%s, dev: %d, mapto: %X <--- %X\n", map->info->symbol, map->dev->id, map->map_dev_ptr, map->map_source_ptr);
// printf("%s, dev: %d, mapto: %X <--- %X of extra\n", map->info->symbol, map->dev->id, map->map_dev_wextra_ptr, map->map_source_wextra_ptr);
}
}
void omp_map_mapfrom(omp_data_map_t *map) {
if (map->map_type == OMP_DATA_MAP_COPY)
omp_map_memcpy_from((void *) map->map_source_wextra_ptr, (void *) map->map_dev_wextra_ptr, map->dev,
map->map_wextra_size); /* memcpy from host to device */
}
void omp_map_mapfrom_async(omp_data_map_t *map, omp_dev_stream_t *stream) {
if (map->map_type == OMP_DATA_MAP_COPY) {
// omp_map_memcpy_from_async((void*)map->map_source_ptr, (void*)map->map_dev_ptr, map->dev, map->map_size, stream); /* memcpy from host to device */
omp_map_memcpy_from_async((void *) map->map_source_wextra_ptr, (void *) map->map_dev_wextra_ptr, map->dev,
map->map_wextra_size, stream); /* memcpy from host to device */
// printf("%s, dev: %d, mapfrom: %X <--- %X\n", map->info->symbol, map->dev->id, map->map_source_ptr, map->map_dev_ptr);
// printf("%s, dev: %d, mapfrom: %X <--- %X of extra\n", map->info->symbol, map->dev->id, map->map_source_wextra_ptr, map->map_dev_wextra_ptr);
}
}
void *omp_unified_malloc(long size) {
void *ptr = NULL;
#if defined (DEVICE_NVGPU_CUDA_SUPPORT) && defined (DEVICE_NVGPU_CUDA_VSHAREDM)
#if defined (DEVICE_NVGPU_CUDA_UNIFIEDMEM)
/* this is only for kepler and > 4.0 cuda rt */
cudaMallocManaged(&ptr, size, 0);
#else
/* cuda zero-copy */
cudaError_t result;
result = cudaHostAlloc(&ptr, size, cudaHostAllocPortable || cudaHostAllocMapped);
devcall_nvgpu_cuda_assert(result);
#endif
#else
ptr = malloc(size);
#endif
return ptr;
}
void omp_unified_free(void *ptr) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT) && defined (DEVICE_NVGPU_CUDA_VSHAREDM)
#if defined (DEVICE_NVGPU_CUDA_UNIFIEDMEM)
/* match cudaMallocManaged */
cudaFree(ptr);
#else
/* cuda zero-copy */
cudaFreeHost(ptr);
#endif
#else
free(ptr);
#endif
return;
}
//void *omp_map_malloc_dev(omp_device_t *dev, long size, void * context, int flags, void *host_ptr) {
void *omp_map_malloc_dev(omp_device_t *dev, void *src, long size) {
omp_device_type_t devtype = dev->type;
void *ptr = NULL;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
if (cudaErrorMemoryAllocation == cudaMalloc(&ptr, size)) {
fprintf(stderr, "cudaMalloc error to allocate mem on device\n");
}
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
ptr = malloc(size);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
ptr = (void*)clCreateBuffer((cl_context)dev->default_context, CL_MEM_READ_WRITE, size, NULL, NULL);
clReleaseMemObject(cl_mem(ptr));
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
/*
* For Xeon Phi using Intel compiler, it use host pointer to track the memory address
* on the device. thus we will return the original pointer
*/
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * srcchar = (char*)src;
//printf("malloc: %X for %d\n", srcchar, size);
#pragma offload_transfer target(mic:dev->sysid) nocopy (srcchar:length(size) alloc_if(1) free_if(0) align(64))
#endif
ptr = src;
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
//printf("dev memory allocated on %d, %X\n", dev->id, ptr);
return ptr;
}
void omp_map_free_dev(omp_device_t *dev, void *ptr, int size) {
omp_device_type_t devtype = dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result = cudaFree(ptr);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
free(ptr);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clReleaseMemObject(cl_mem(ptr));
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * charptr = (char*)ptr;
// printf("free: %X\n", ptr);
#pragma offload_transfer target(mic:dev->sysid) nocopy (charptr:length(size) alloc_if(0) free_if(1))
#endif
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
void omp_map_memcpy_to(void *dst, omp_device_t *dstdev, const void *src, long size) {
omp_device_type_t devtype = dstdev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaMemcpy((void *)dst,(const void *)src,size, cudaMemcpyHostToDevice);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
memcpy((void *) dst, (const void *) src, size);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
cl_int err = clEnqueueWriteBuffer(dstdev->default_stream.systream.clqueue, dst, CL_TRUE, 0, size, src, 0, NULL, NULL);
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * charsrc = (char*)src;
#pragma offload_transfer target(mic:dstdev->sysid) in (charsrc:length(size) alloc_if(0) free_if(0))
#endif
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
void omp_map_memcpy_to_async(void *dst, omp_device_t *dstdev, const void *src, long size, omp_dev_stream_t *stream) {
// printf("memcpytoasync: dev: %d, %X->%X\n", dstdev->id, src, dst);
omp_device_type_t devtype = dstdev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaMemcpyAsync((void *)dst,(const void *)src,size, cudaMemcpyHostToDevice, stream->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
// fprintf(stderr, "no async call support, use sync memcpy call\n");
memcpy((void *) dst, (const void *) src, size);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
cl_int err = clEnqueueWriteBuffer(stream->systream.clqueue, dst, CL_FALSE, 0, size, src, 0, NULL, NULL);
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * charsrc = (char*)src;
//printf("copyto_async: %X for %d bytes\n", charsrc, size);
#pragma offload_transfer target(mic:dstdev->sysid) in (charsrc:length(size) alloc_if(0) free_if(0))
#endif
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
void omp_map_memcpy_from(void *dst, const void *src, omp_device_t *srcdev, long size) {
omp_device_type_t devtype = srcdev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaMemcpy((void *)dst,(const void *)src,size, cudaMemcpyDeviceToHost);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
memcpy((void *) dst, (const void *) src, size);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clEnqueueReadBuffer(srcdev->default_stream.systream.clqueue, src, CL_TRUE, 0, size, dst, 0, NULL, NULL );
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * chardst = (char*)dst;
#pragma offload_transfer target(mic:srcdev->sysid) out (chardst:length(size) alloc_if(0) free_if(0))
#endif
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
/**
* device to host, async */
void omp_map_memcpy_from_async(void *dst, const void *src, omp_device_t *srcdev, long size, omp_dev_stream_t *stream) {
omp_device_type_t devtype = srcdev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaMemcpyAsync((void *)dst,(const void *)src,size, cudaMemcpyDeviceToHost, stream->systream.cudaStream);
// printf("memcpyfrom_async: dev: %d, %X->%X\n", srcdev->id, src, dst);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
// fprintf(stderr, "no async call support, use sync memcpy call\n");
memcpy((void *) dst, (const void *) src, size);
// printf("memcpy from: dest: %X, src: %X, size: %d\n", map->map_buffer, map->map_dev_ptr);
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clEnqueueReadBuffer(stream->systream.clqueue, src, CL_FALSE, 0, size, dst, 0, NULL, NULL );
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
#if defined (DEVICE_ITLMIC_SUPPORT)
#ifndef ITLMIC_COMBINED_OFFLOADING
char * chardst = (char*)dst;
//printf("Copyfrom_async: %X\n", chardst);
#pragma offload_transfer target(mic:srcdev->sysid) out (chardst:length(size) alloc_if(0) free_if(0))
#endif
#endif
} else {
fprintf(stderr, "device type is not supported for this call\n");
abort();
}
}
/**
* this should be calling from src for NGVPU implementation
*
* return 1: peer2peer is available and enabled
* 0: no peer2peer
*
* TODO: OpenCL support
*/
int omp_map_enable_memcpy_DeviceToDevice(omp_device_t *dstdev, omp_device_t *srcdev) {
omp_device_type_t dst_devtype = dstdev->type;
omp_device_type_t src_devtype = srcdev->type;
if (dst_devtype == OMP_DEVICE_NVGPU && src_devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
int can_access = 0;
cudaError_t result;
result = cudaDeviceCanAccessPeer(&can_access, srcdev->sysid, dstdev->sysid);
devcall_nvgpu_cuda_assert(result);
if (can_access) {
result = cudaDeviceEnablePeerAccess(dstdev->sysid, 0);
if(result != cudaErrorPeerAccessAlreadyEnabled) {
return 0;
} else return 1;
} else return 1;
#endif
} else if (dst_devtype == OMP_DEVICE_THSIM || dst_devtype == OMP_DEVICE_HOSTCPU ||
src_devtype == OMP_DEVICE_THSIM || src_devtype == OMP_DEVICE_HOSTCPU) {
return 0;
} else if ((src_devtype == OMP_DEVICE_NVGPU && dst_devtype == OMP_DEVICE_ITLMIC) ||
(dst_devtype == OMP_DEVICE_NVGPU && src_devtype == OMP_DEVICE_ITLMIC) ) {
return 0;
} else if ((src_devtype == OMP_DEVICE_ITLMIC && dst_devtype == OMP_DEVICE_ITLMIC)) {
return 0;
} else if ((src_devtype == OMP_DEVICE_ITLMIC && dst_devtype == OMP_DEVICE_HOSTCPU)) {
return 1;
} else if ((src_devtype == OMP_DEVICE_HOSTCPU && dst_devtype == OMP_DEVICE_ITLMIC)) {
return 1;
}
return 0;
}
/**
* TODO: OpenCL support
*/
void omp_map_memcpy_DeviceToDevice(void *dst, omp_device_t *dstdev, void *src, omp_device_t *srcdev, int size) {
omp_device_type_t dst_devtype = dstdev->type;
omp_device_type_t src_devtype = srcdev->type;
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
if (dst_devtype == OMP_DEVICE_NVGPU && src_devtype == OMP_DEVICE_NVGPU) {
cudaError_t result;
result = cudaMemcpy((void *)dst,(const void *)src,size, cudaMemcpyDeviceToDevice);
// result = cudaMemcpyPeer(dst, dstdev->sysid, src, srcdev->sysid, size);
devcall_nvgpu_cuda_assert(result);
return;
} else if ((dst_devtype == OMP_DEVICE_THSIM || dst_devtype == OMP_DEVICE_HOSTCPU) && src_devtype == OMP_DEVICE_NVGPU) {
cudaError_t result;
result = cudaMemcpy((void *)dst,(const void *)src,size, cudaMemcpyDeviceToHost);
devcall_nvgpu_cuda_assert(result);
return;
} else if(dst_devtype == OMP_DEVICE_NVGPU && (src_devtype == OMP_DEVICE_THSIM || src_devtype == OMP_DEVICE_HOSTCPU)) {
cudaError_t result;
result = cudaMemcpy((void *)dst,(const void *)src,size, cudaMemcpyHostToDevice);
devcall_nvgpu_cuda_assert(result);
return;
} else if (dst_devtype == OMP_DEVICE_NVGPU && src_devtype == OMP_DEVICE_ITLGPU) {
} else if (src_devtype == OMP_DEVICE_NVGPU && dst_devtype == OMP_DEVICE_ITLGPU) {
}
#endif
if ((dst_devtype == OMP_DEVICE_THSIM || dst_devtype == OMP_DEVICE_HOSTCPU) &&
(src_devtype == OMP_DEVICE_THSIM || src_devtype ==
OMP_DEVICE_HOSTCPU)) {
memcpy((void *) dst, (const void *) src, size);
} else if ((src_devtype == OMP_DEVICE_ITLMIC && dst_devtype == OMP_DEVICE_HOSTCPU)) {
#if defined (DEVICE_ITLMIC_SUPPORT)
char * charsrc = (char*)src;
//printf("copyto_async: %X for %d bytes\n", charsrc, size);
#pragma offload_transfer target(mic:srcdev->sysid) out (charsrc:length(size) alloc_if(0) free_if(0))
#endif
} else if ((src_devtype == OMP_DEVICE_HOSTCPU && dst_devtype == OMP_DEVICE_ITLMIC)) {
#if defined (DEVICE_ITLMIC_SUPPORT)
char * charsrc = (char*)src;
//printf("copyto_async: %X for %d bytes\n", charsrc, size);
#pragma offload_transfer target(mic:dstdev->sysid) in (charsrc:length(size) alloc_if(0) free_if(0))
#endif
} else {
fprintf(stderr, "device type is not supported for this call: %s:%d\n", __FILE__, __LINE__);
abort();
}
}
/** it is a push operation, i.e. src push data to dst */
/**
* TODO: OpenCL support
*/
void omp_map_memcpy_DeviceToDeviceAsync(void *dst, omp_device_t *dstdev, void *src, omp_device_t *srcdev, int size,
omp_dev_stream_t *srcstream) {
omp_device_type_t dst_devtype = dstdev->type;
omp_device_type_t src_devtype = srcdev->type;
if (dst_devtype == OMP_DEVICE_NVGPU && src_devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaMemcpyAsync((void *)dst,(const void *)src,size, cudaMemcpyDeviceToDevice,srcstream->systream.cudaStream);
//result = cudaMemcpyPeerAsync(dst, dstdev->sysid, src, srcdev->sysid, size, srcstream->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (dst_devtype == OMP_DEVICE_THSIM && src_devtype == OMP_DEVICE_THSIM) {
memcpy((void *) dst, (const void *) src, size);
} else {
fprintf(stderr, "device type is not supported for this call: %s:%d\n", __FILE__, __LINE__);
abort();
}
}
/* In the current implementation of the runtime, we will NOT use stream callback to do the timing and others such as reduction operation.
* The reason is because CUDA use a driver thread to handle callback, which become not necessnary since we have a dedicated helper thread
* for each GPU and the helper thread could do this kind of work
*/
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
#if 0
void xomp_beyond_block_reduction_float_stream_callback(cudaStream_t stream, cudaError_t status, void* userData ) {
omp_reduction_float_t * rdata = (omp_reduction_float_t*)userData;
float result = 0.0;
int i;
for (i=0; i<rdata->num; i++)
result += rdata->input[i];
rdata->result = result;
}
#endif
void omp_stream_host_timer_callback(cudaStream_t stream, cudaError_t status, void* userData ) {
double * time = (double*)userData;
*time = read_timer_ms();
}
#endif
void omp_stream_create(omp_device_t *d, omp_dev_stream_t *stream) {
stream->dev = d;
if (d->type == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
//stream->systream.cudaStream = 0;
result = cudaStreamCreateWithFlags(&stream->systream.cudaStream, cudaStreamNonBlocking);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (d->type == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
// Create a command queue
cl_command_queue queue; // command queue
cl_int err;
queue = clCreateCommandQueue(context, (cl_device_id)dev->properties, 0, &err);
stream->systream.clqueue = queue;
#endif
} else if (d->type == OMP_DEVICE_ITLGPU) {
/* do nothing since we use offload pragma for data movement */
} else if (d->type == OMP_DEVICE_THSIM || d->type == OMP_DEVICE_HOSTCPU) {
/* do nothing */
} else {
}
}
/**
* sync device by syncing the stream so all the pending calls the stream are completed
*
* if destroy_stream != 0; the stream will be destroyed.
*/
void omp_stream_sync(omp_dev_stream_t *st) {
omp_device_type_t devtype = st->dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaStreamSynchronize(st->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#else
#endif
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clFinish(st->systream.clqueue);
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
} else {
}
}
void omp_stream_destroy(omp_dev_stream_t *st) {
omp_device_type_t devtype = st->dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaStreamDestroy(st->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#else
#endif
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
clreleasecommandqueue(st->systream.clqueue);
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
} else {
}
}
/* the event msg has limited length defined by OMP_EVENT_MSG_LENGTH macro, additional char will be cut off */
void omp_event_init(omp_event_t *ev, omp_device_t *dev, omp_event_record_method_t record_method,
omp_dev_stream_t *stream, const char *event_name, const char *event_msg, ...) {
if (stream != NULL && stream->dev != dev) {
fprintf(stderr, "stream and event are not compatible, they are from two different devices: %s, %s\n",
stream->dev->name, dev->name);
abort();
}
ev->dev = dev;
ev->record_method = record_method;
ev->count = 0;
ev->recorded = 0;
ev->elapsed_dev = ev->elapsed_host = 0.0;
ev->stream = stream;
ev->event_name = event_name;
va_list l;
va_start(l, event_msg);
vsnprintf(ev->event_description, OMP_EVENT_MSG_LENGTH, event_msg, l);
va_end(l);
omp_device_type_t devtype = dev->type;
if (record_method == OMP_EVENT_DEV_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaEventCreateWithFlags(&ev->start_event_dev, cudaEventBlockingSync);
devcall_nvgpu_cuda_assert(result);
result = cudaEventCreateWithFlags(&ev->stop_event_dev, cudaEventBlockingSync);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
/* do nothing */
} else if (devtype == OMP_DEVICE_ITLGPU) {
#if defined (DEVICE_OPENCL_SUPPORT)
#endif
} else if (devtype == OMP_DEVICE_ITLMIC) {
} else {
fprintf(stderr, "other type of devices are not yet supported to init this event\n");
abort();
}
}
//omp_event_print(ev);
}
/* the event msg has limited length defined by OMP_EVENT_MSG_LENGTH macro, additional char will be cut off */
void omp_event_set_attribute(omp_event_t *ev, omp_dev_stream_t *stream, const char *event_name, const char *event_msg, ...) {
if (stream != NULL && stream->dev != ev->dev) {
fprintf(stderr, "stream and event are from two different devices: %p, %p\n", stream->dev, ev->dev);
fprintf(stderr, "stream and event are not compatible, they are from two different devices: %s, %s\n",
stream->dev->name, ev->dev->name);
abort();
}
ev->stream = stream;
ev->event_name = event_name;
va_list l;
va_start(l, event_msg);
vsnprintf(ev->event_description, OMP_EVENT_MSG_LENGTH, event_msg, l);
va_end(l);
//omp_event_print(ev);
}
void omp_event_print(omp_event_t *ev) {
printf("ev: %X, dev: %X, stream: %X, record method: %d, name: %s, description: %s\n", ev, ev->dev,
ev->stream, ev->record_method, ev->event_name, ev->event_description);
}
void omp_event_record_start(omp_event_t *ev) {
//printf("omp_event_record_start: ev %X name: %s, dev: %X\n", ev, ev->event_name, ev->dev);
#if 0
/* turn this off since our code does not have this bug ;-) */
if (ev->event_name == NULL) {
fprintf(stderr, "An event without a name (%X) for dev (%d) cannot record\n", ev, ev->dev->id);
abort();
}
#endif
omp_device_type_t devtype = ev->dev->type;
omp_event_record_method_t rm = ev->record_method;
if (rm == OMP_EVENT_DEV_RECORD || rm == OMP_EVENT_HOST_DEV_RECORD) {
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaStreamAddCallback(ev->stream->systream.cudaStream, omp_stream_host_timer_callback, &ev->start_time_dev, 0);
result = cudaEventRecord(ev->start_event_dev, ev->stream->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
ev->start_time_dev = read_timer_ms();
} else if (devtype == OMP_DEVICE_ITLMIC) {
ev->start_time_dev = read_timer_ms();
} else {
fprintf(stderr, "other type of devices are not yet supported to start event recording\n");
}
}
if (rm == OMP_EVENT_HOST_RECORD || rm == OMP_EVENT_HOST_DEV_RECORD) {
ev->start_time_host = read_timer_ms();
}
}
void omp_event_record_stop(omp_event_t *ev) {
omp_dev_stream_t *stream = ev->stream;
omp_event_record_method_t record_method = ev->record_method;
omp_device_type_t devtype = ev->dev->type;
if (record_method == OMP_EVENT_DEV_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
cudaError_t result;
result = cudaStreamAddCallback(stream->systream.cudaStream, omp_stream_host_timer_callback, &ev->stop_time_dev, 0);
result = cudaEventRecord(ev->stop_event_dev, stream->systream.cudaStream);
devcall_nvgpu_cuda_assert(result);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
ev->stop_time_dev = read_timer_ms();
} else if (devtype == OMP_DEVICE_ITLMIC) {
ev->stop_time_dev = read_timer_ms();
} else {
fprintf(stderr, "other type of devices are not yet supported to stop event record\n");
}
}
if (record_method == OMP_EVENT_HOST_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
ev->stop_time_host = read_timer_ms();
}
ev->recorded = 1;
}
static double omp_event_elapsed_ms_dev(omp_event_t *ev) {
omp_device_type_t devtype = ev->dev->type;
float elapsed = -1.0;
double elapsed1 = -1.0;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
elapsed1 = ev->stop_time_dev - ev->start_time_dev;
cudaError_t result;
result = cudaEventSynchronize(ev->start_event_dev);
devcall_nvgpu_cuda_assert(result);
result = cudaEventSynchronize(ev->stop_event_dev);
devcall_nvgpu_cuda_assert(result);
result = cudaEventElapsedTime(&elapsed, ev->start_event_dev, ev->stop_event_dev);
devcall_nvgpu_cuda_assert(result);
//printf("timing difference, callback: %f, event: %f\n", elapsed1, elapsed);
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
elapsed = ev->stop_time_dev - ev->start_time_dev;
} else if (devtype == OMP_DEVICE_ITLMIC) {
elapsed = ev->stop_time_dev - ev->start_time_dev;
} else {
fprintf(stderr, "other type of devices are not yet supported to calculate elapsed\n");
}
//printf("dev event: start: %f, stop: %f, elapsed: %f (%f)\n", ev->start_time_dev, ev->stop_time_dev, elapsed, elapsed1);
return elapsed;
}
static double omp_event_elapsed_ms_host(omp_event_t *ev) {
double elapsed = ev->stop_time_host - ev->start_time_host;
//printf("host event: start: %f, stop: %f, elapsed: %f\n", ev->start_time_host, ev->stop_time_host, elapsed);
return elapsed;
}
/**
* Computes the elapsed time between two events (in milliseconds with a resolution of around 0.5 microseconds).
*/
double omp_event_elapsed_ms(omp_event_t *ev) {
double rt = 0.0;
if (!ev->recorded) return rt;
omp_event_record_method_t record_method = ev->record_method;
omp_device_type_t devtype = ev->dev->type;
if (record_method == OMP_EVENT_DEV_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
ev->elapsed_dev = omp_event_elapsed_ms_dev(ev);
rt = ev->elapsed_dev;
}
if (record_method == OMP_EVENT_HOST_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
ev->elapsed_host = omp_event_elapsed_ms_host(ev);
rt = ev->elapsed_host;
}
ev->recorded = 0;
return rt;
}
double omp_event_accumulate_elapsed_ms(omp_event_t *ev, double offset) {
double rt = 0.0;
if (!ev->recorded) return rt;
omp_event_record_method_t record_method = ev->record_method;
omp_device_type_t devtype = ev->dev->type;
if (record_method == OMP_EVENT_DEV_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
rt = omp_event_elapsed_ms_dev(ev);
ev->elapsed_dev += rt + offset;
}
if (record_method == OMP_EVENT_HOST_RECORD || record_method == OMP_EVENT_HOST_DEV_RECORD) {
rt = omp_event_elapsed_ms_host(ev);
ev->elapsed_host += rt + offset;
}
ev->count++;
ev->recorded = 0;
return rt;
}
int omp_get_max_threads_per_team(omp_device_t *dev) {
omp_device_type_t devtype = dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
return ((struct cudaDeviceProp*)dev->dev_properties)->maxThreadsPerBlock;
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
return dev->num_cores;
} else if (devtype == OMP_DEVICE_ITLMIC) {
return dev->num_cores;
} else {
}
return 0;
}
int omp_get_optimal_threads_per_team(omp_device_t *dev) {
int max = omp_get_max_threads_per_team(dev);
if (max == 1) return 1;
else return max / 2;
}
/**
* so far we only do 1D, the first dimension
*/
int omp_get_max_teams_per_league(omp_device_t *dev) {
omp_device_type_t devtype = dev->type;
if (devtype == OMP_DEVICE_NVGPU) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
return 2048; // ((struct cudaDeviceProp*)dev->dev_properties)->maxGridSize[0]/2;
#endif
} else if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
return 1;
} else if (devtype == OMP_DEVICE_ITLMIC) {
return 1;
} else {
}
return 0;
}
int omp_get_optimal_teams_per_league(omp_device_t *dev, int threads_per_team, int total) {
int teams_per_league = (total + threads_per_team - 1) / threads_per_team;
int max_teams_per_league = 2048; // omp_get_max_teams_per_league(dev);
//printf("dev: %d max teams: %d\n", dev->id, max_teams_per_league);
if (teams_per_league > max_teams_per_league) return max_teams_per_league;
else return teams_per_league;
}
|
LRBreakup.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_LRBREAKUP_H
#define QMCPLUSPLUS_LRBREAKUP_H
#include "Configuration.h"
#include "Particle/ParticleSet.h"
#include "LongRange/KContainer.h"
#include "Numerics/OhmmsBlas.h"
#include <cassert>
namespace qmcplusplus
{
template<class BreakupBasis>
struct LRBreakup
{
DECLARE_COULOMB_TYPES
//Typedef for the lattice-type. We don't need the full particle-set.
typedef ParticleSet::ParticleLayout_t ParticleLayout_t;
//We use an internal k-list with degeneracies to do the breakup.
//We do this because the number of vectors is much larger than we'd
//use elsewhere.
void AddKToList(mRealType k, mRealType degeneracy = 1.0);
///The basis to be used for breakup.
BreakupBasis& Basis;
/// For each k, KList[k][0] = |k| and KList[k][1] = degeneracy
std::vector<TinyVector<mRealType, 2>> KList;
/** setup KList
* @param kc k-space cutoff for long-range sums
* @param kcont k at which approximate (spherical shell) degeneracies are used.
* @param kmax largest k used for performing the breakup
* @return the maximum kshell for the given kc
*/
int SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax);
//Fk is FT of F_full(r) up to kmax
//adjust is used for constraining values in the breakup
/* REPLACED SO WE CAN USE TYPES OTHER THAN STL VECTOR.
mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t,
const std::vector<bool> &adjust);
mRealType DoBreakup(const std::vector<mRealType> &Fk, std::vector<mRealType> &t);
*/
mRealType DoBreakup(mRealType* Fk, mRealType* t, mRealType* adjust);
mRealType DoGradBreakup(mRealType* Fk, mRealType* t, mRealType* adjust);
mRealType DoStrainBreakup(mRealType* Fk, mRealType* dFk, mRealType* t, mRealType* adjust);
void DoAllBreakup(mRealType* chisqr,
mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* gt,
mRealType* dt,
mRealType* adjust);
mRealType DoBreakup(mRealType* Fk, mRealType* t)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int numElem = Basis.NumBasisElem(); //t.size();
A.resize(numElem, numElem);
b.resize(numElem, 0.0);
cnk.resize(numElem, KList.size());
// Fill in cnk.
// app_log() << "Check OMP size : numElem, KList.size : " << numElem << " , " << KList.size() << std::endl;
#pragma omp parallel for shared(cnk)
for (int n = 0; n < numElem; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
// Now, fill in A and b
A = 0.0;
for (int l = 0; l < numElem; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < numElem; n++)
A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//////////////////////////
//Do the SVD:
// Matrix<mRealType> U(numElem, numElem), V(numElem, numElem);
// std::vector<mRealType> S(numElem), Sinv(numElem);
//////////////////////////
// SVdecomp(A, U, S, V);
//////////////////////////
int M = A.rows();
int N = A.cols();
Matrix<mRealType> Atrans(N, M);
Matrix<mRealType> U, V;
U.resize(std::min(M, N), M);
V.resize(N, std::min(M, N));
std::vector<mRealType> S, Sinv;
S.resize(std::min(N, M));
//Do the transpose
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
Atrans(j, i) = A(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = M;
int LDU = M;
int LDVT = std::min(M, N);
int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
///////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < S.size(); i++)
Smax = std::max(S[i], Smax);
Sinv.resize(S.size());
for (int i = 0; i < S.size(); i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
for (int i = 0; i < numElem; i++)
t[i] = 0.0;
// Compute t_n, removing singular values
for (int i = 0; i < numElem; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < numElem; j++)
coef += U(j, i) * b[j];
coef *= Sinv[i];
for (int k = 0; k < numElem; k++)
t[k] += coef * V(k, i);
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < numElem; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][1] * Yk * Yk;
}
return (chi2);
}
//The constructor. Call the constructor of basis...
//set up the basis parameters too.
LRBreakup(BreakupBasis& bref) : Basis(bref)
{ /*Do Nothing*/
}
mRealType DoGradBreakup(mRealType* Fk, mRealType* t)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int numElem = Basis.NumBasisElem(); //t.size();
A.resize(numElem, numElem);
b.resize(numElem, 0.0);
cnk.resize(numElem, KList.size());
// Fill in cnk.
for (int n = 0; n < numElem; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
// Now, fill in A and b
A = 0.0;
for (int l = 0; l < numElem; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < numElem; n++)
A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//////////////////////////
//Do the SVD:
// Matrix<mRealType> U(numElem, numElem), V(numElem, numElem);
// std::vector<mRealType> S(numElem), Sinv(numElem);
//////////////////////////
// SVdecomp(A, U, S, V);
//////////////////////////
int M = A.rows();
int N = A.cols();
Matrix<mRealType> Atrans(N, M);
Matrix<mRealType> U, V;
U.resize(std::min(M, N), M);
V.resize(N, std::min(M, N));
std::vector<mRealType> S, Sinv;
S.resize(std::min(N, M));
//Do the transpose
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
Atrans(j, i) = A(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = M;
int LDU = M;
int LDVT = std::min(M, N);
int LWORK = 10 * std::max(3 * std::min(N, M) + std::max(M, N), 5 * std::min(M, N));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &M, &N, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
///////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < S.size(); i++)
Smax = std::max(S[i], Smax);
Sinv.resize(S.size());
for (int i = 0; i < S.size(); i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
for (int i = 0; i < numElem; i++)
t[i] = 0.0;
// Compute t_n, removing singular values
for (int i = 0; i < numElem; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < numElem; j++)
coef += U(j, i) * b[j];
coef *= Sinv[i];
for (int k = 0; k < numElem; k++)
t[k] += coef * V(k, i);
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
Yk = Fk[ki];
for (int n = 0; n < numElem; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += k2 * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
};
template<class BreakupBasis>
void LRBreakup<BreakupBasis>::AddKToList(mRealType k, mRealType degeneracy /* =1.0 */)
{
//Search for this k already in list
int ki = 0;
while ((ki < KList.size()) && (std::abs(k - KList[ki][0]) > 1.0e-12))
ki++;
if (ki == KList.size())
{
TinyVector<mRealType, 2> temp(k, degeneracy);
KList.push_back(temp);
}
else
KList[ki][1] += degeneracy;
}
template<class BreakupBasis>
int LRBreakup<BreakupBasis>::SetupKVecs(mRealType kc, mRealType kcont, mRealType kmax)
{
//Add low |k| ( < kcont) k-points with exact degeneracy
KContainer kexact;
kexact.UpdateKLists(Basis.get_Lattice(), kcont);
bool findK = true;
mRealType kc2 = kc * kc;
//use at least one shell
size_t ks = 0;
kc2 = std::max(kc2, static_cast<mRealType>(kexact.ksq[kexact.kshell[ks]]));
while (findK)
{
if (kexact.ksq[kexact.kshell[ks]] > kc2)
findK = false;
else
ks++;
}
size_t maxkshell = ks;
size_t numk = kexact.numk - kexact.kshell[ks];
for (; ks < kexact.kshell.size() - 1; ks++)
AddKToList(std::sqrt(kexact.ksq[kexact.kshell[ks]]), kexact.kshell[ks + 1] - kexact.kshell[ks]);
////Add these vectors to the internal list
//int numk=0;
//mRealType modk2;
//for(int ki=0; ki<kexact.numk; ki++) {
// modk2 = dot(kexact.kpts_cart[ki],kexact.kpts_cart[ki]);
// if(modk2 > (kc*kc)) { //Breakup needs kc < k < kcont.
// AddKToList(std::sqrt(modk2));
// numk++;
// }
//}
//Add high |k| ( >kcont, <kmax) k-points with approximate degeneracy
//Volume of 1 K-point is (2pi)^3/(a1.a2^a3)
#if OHMMS_DIM == 3
mRealType kelemvol = 8 * M_PI * M_PI * M_PI / Basis.get_CellVolume();
//Generate 4000 shells:
const int N = 4000;
mRealType deltak = (kmax - kcont) / N;
for (int i = 0; i < N; i++)
{
mRealType k1 = kcont + deltak * i;
mRealType k2 = k1 + deltak;
mRealType kmid = 0.5 * (k1 + k2);
mRealType shellvol = 4.0 * M_PI * (k2 * k2 * k2 - k1 * k1 * k1) / 3.0;
mRealType degeneracy = shellvol / kelemvol;
AddKToList(kmid, degeneracy);
numk += static_cast<int>(degeneracy);
}
#elif OHMMS_DIM == 2
mRealType kelemvol = 4 * M_PI * M_PI / Basis.get_CellVolume();
//Generate 8000 shells:
const int N = 8000;
mRealType deltak = (kmax - kcont) / N;
for (int i = 0; i < N; i++)
{
mRealType k1 = kcont + deltak * i;
mRealType k2 = k1 + deltak;
mRealType kmid = 0.5 * (k1 + k2);
mRealType shellvol = M_PI * (k2 * k2 - k1 * k1);
mRealType degeneracy = shellvol / kelemvol;
AddKToList(kmid, degeneracy);
numk += static_cast<int>(degeneracy);
}
#endif
app_log() << " NUMBER OF OPT_BREAK KVECS = " << numk << std::endl;
return maxkshell;
//numk now contains the total number of vectors.
//this->klist.size() contains the number of unique vectors.
}
//Do the constrained breakup
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoBreakup(mRealType* Fk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
cnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
b[l] += KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoGradBreakup(mRealType* Fk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> cnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
cnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
b[l] += k2 * KList[ki][1] * Fk[ki] * cnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += k2 * KList[ki][1] * cnk(l, ki) * cnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
}
chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
typename LRBreakup<BreakupBasis>::mRealType LRBreakup<BreakupBasis>::DoStrainBreakup(mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
std::vector<mRealType> b;
Matrix<mRealType> dcnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
b.resize(N, 0.0);
dcnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k);
}
}
//Fill in A and b
A = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
// b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki);
b[l] += k2 * KList[ki][1] * (dFk[ki]) * dcnk(l, ki);
for (int n = 0; n < N; n++)
A(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki);
}
}
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Ac.resize(M, M);
std::vector<mRealType> bc(M, 0.0), tc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
b[row] -= A(row, col) * t[col];
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> Atrans(n, m);
Matrix<mRealType> U, V;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
Atrans(j, i) = Ac(i, j);
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Atrans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> Utrans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
Utrans(j, i) = U(i, j);
}
U.resize(uc, ur);
U = Utrans;
//////////////////////////////////
// Zero out near-singular values
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
for (int j = 0; j < M; j++)
coef += U(j, i) * bc[j];
coef *= Sinv[i];
for (int k = 0; k < M; k++)
tc[k] += coef * V(k, i);
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
j++;
}
// Calculate chi-squared
mRealType Yk, chi2;
chi2 = 0.0;
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = dFk[ki]; //-Fk[ki];
for (int n = 0; n < N; n++)
{
Yk -= dcnk(n, ki) * t[n];
}
chi2 += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk * Yk;
}
return (chi2);
}
template<class BreakupBasis>
void LRBreakup<BreakupBasis>::DoAllBreakup(mRealType* chisqrlist,
mRealType* Fk,
mRealType* dFk,
mRealType* t,
mRealType* gt,
mRealType* dt,
mRealType* adjust)
{
const mRealType tolerance = std::numeric_limits<mRealType>::epsilon();
//t and adjust must be allocated up to Basis.NumBasisElem();
//Fk must be allocated and filled up to KList.size();
// assert(t.size()==adjust.size());
// assert(t.size()==Basis.NumBasisElem());
Matrix<mRealType> A;
Matrix<mRealType> Af;
Matrix<mRealType> As;
std::vector<mRealType> b;
std::vector<mRealType> bf;
std::vector<mRealType> bs;
Matrix<mRealType> cnk;
Matrix<mRealType> dcnk;
int N = Basis.NumBasisElem(); //t.size();
A.resize(N, N);
Af.resize(N, N);
As.resize(N, N);
b.resize(N, 0.0);
bf.resize(N, 0.0);
bs.resize(N, 0.0);
cnk.resize(N, KList.size());
dcnk.resize(N, KList.size());
//Fill in cnk.
for (int n = 0; n < N; n++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k = KList[ki][0];
cnk(n, ki) = Basis.c(n, k);
dcnk(n, ki) = Basis.dc_dk(n, k); //-Basis.c(n,k);
}
}
//Fill in A and b
A = 0.0;
Af = 0.0;
As = 0.0;
for (int l = 0; l < N; l++)
{
for (int ki = 0; ki < KList.size(); ki++)
{
mRealType k2 = KList[ki][0] * KList[ki][0];
mRealType temp = KList[ki][1] * Fk[ki] * cnk(l, ki);
// b[l] += k2*KList[ki][1]*(dFk[ki]-Fk[ki]) * dcnk(l, ki);
b[l] += temp;
bf[l] += k2 * temp;
bs[l] += k2 * KList[ki][1] * dFk[ki] * dcnk(l, ki);
for (int n = 0; n < N; n++)
{
temp = KList[ki][1] * cnk(l, ki) * cnk(n, ki);
A(l, n) += temp;
Af(l, n) += k2 * temp;
As(l, n) += k2 * KList[ki][1] * dcnk(l, ki) * dcnk(n, ki);
}
}
}
//************************************
//FOR POTENTIAL AND FORCE
//************************************
//Reduce for constraints
int M = N;
for (int i = 0; i < N; i++)
if (!adjust[i])
M--;
//The c is for "constrained"
Matrix<mRealType> Ac;
Matrix<mRealType> Afc;
Matrix<mRealType> Asc;
Ac.resize(M, M);
Afc.resize(M, M);
Asc.resize(M, M);
std::vector<mRealType> bc(M, 0.0), bfc(M, 0.0), bsc(M, 0.0), tc(M, 0.0), tfc(M, 0.0), tsc(M, 0.0);
//Build constrained Ac and bc
int j = 0;
for (int col = 0; col < N; col++)
{
if (adjust[col])
{
// Copy column a A to Ac
int i = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
Ac(i, j) = A(row, col);
Afc(i, j) = Af(row, col);
Asc(i, j) = As(row, col);
i++;
}
j++;
}
else
{
// Otherwise, subtract t(col)*A(:,col) from bc
for (int row = 0; row < N; row++)
{
b[row] -= A(row, col) * t[col];
bf[row] -= Af(row, col) * gt[col];
bs[row] -= As(row, col) * dt[col];
}
}
}
j = 0;
for (int row = 0; row < N; row++)
if (adjust[row])
{
bc[j] = b[row];
bfc[j] = bf[row];
bsc[j] = bs[row];
j++;
}
// Do SVD:
// -------
// Matrix<mRealType> U(M, M), V(M, M);
// std::vector<mRealType> S(M), Sinv(M);
// SVdecomp(Ac, U, S, V);
////////////////////////////////
int m = Ac.rows();
int n = Ac.cols();
Matrix<mRealType> A_trans(n, m);
Matrix<mRealType> Af_trans(n, m);
Matrix<mRealType> As_trans(n, m);
Matrix<mRealType> U, V;
Matrix<mRealType> Uf, Vf;
Matrix<mRealType> Us, Vs;
U.resize(std::min(m, n), m);
V.resize(n, std::min(m, n));
Uf.resize(std::min(m, n), m);
Vf.resize(n, std::min(m, n));
Us.resize(std::min(m, n), m);
Vs.resize(n, std::min(m, n));
std::vector<mRealType> S, Sinv;
S.resize(std::min(n, m));
std::vector<mRealType> Sf, Sfinv;
Sf.resize(std::min(n, m));
std::vector<mRealType> Ss, Ssinv;
Ss.resize(std::min(n, m));
//do the transpose
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
A_trans(j, i) = Ac(i, j);
Af_trans(j, i) = Afc(i, j);
As_trans(j, i) = Asc(i, j);
}
}
char JOBU = 'S';
char JOBVT = 'S';
int LDA = m;
int LDU = m;
int LDVT = std::min(m, n);
int LWORK = 10 * std::max(3 * std::min(n, m) + std::max(m, n), 5 * std::min(m, n));
std::vector<mRealType> WORK(LWORK);
int INFO;
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, A_trans.data(), &LDA, &S[0], U.data(), &LDU, V.data(), &LDVT, &WORK[0], &LWORK,
&INFO);
assert(INFO == 0);
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, Af_trans.data(), &LDA, &Sf[0], Uf.data(), &LDU, Vf.data(), &LDVT, &WORK[0],
&LWORK, &INFO);
assert(INFO == 0);
LAPACK::gesvd(&JOBU, &JOBVT, &m, &n, As_trans.data(), &LDA, &Ss[0], Us.data(), &LDU, Vs.data(), &LDVT, &WORK[0],
&LWORK, &INFO);
assert(INFO == 0);
int ur = U.rows();
int uc = U.cols();
Matrix<mRealType> U_trans(uc, ur);
Matrix<mRealType> Uf_trans(uc, ur);
Matrix<mRealType> Us_trans(uc, ur);
for (int i = 0; i < ur; i++)
{
for (int j = 0; j < uc; j++)
{
U_trans(j, i) = U(i, j);
Uf_trans(j, i) = Uf(i, j);
Us_trans(j, i) = Us(i, j);
}
}
U.resize(uc, ur);
U = U_trans;
Uf.resize(uc, ur);
Uf = Uf_trans;
Us.resize(uc, ur);
Us = Us_trans;
//////////////////////////////////
// Zero out near-singular values
//First, do normal breakup.
mRealType Smax = S[0];
for (int i = 1; i < M; i++)
Smax = std::max(S[i], Smax);
for (int i = 0; i < M; i++)
if (S[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sinv.resize(S.size());
for (int i = 0; i < M; i++)
Sinv[i] = (S[i] < (tolerance * Smax)) ? 0.0 : (1.0 / S[i]);
int numSingular = 0;
for (int i = 0; i < Sinv.size(); i++)
if (Sinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in energy breakup.\n";
// Compute t_n, removing singular values
//Second, do force.
Smax = Sf[0];
for (int i = 1; i < M; i++)
Smax = std::max(Sf[i], Smax);
for (int i = 0; i < M; i++)
if (Sf[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Sfinv.resize(Sf.size());
for (int i = 0; i < M; i++)
Sfinv[i] = (Sf[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Sf[i]);
numSingular = 0;
for (int i = 0; i < Sfinv.size(); i++)
if (Sfinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in force breakup.\n";
// Compute t_n, removing singular values
//First, do strain.
Smax = Ss[0];
for (int i = 1; i < M; i++)
Smax = std::max(Ss[i], Smax);
for (int i = 0; i < M; i++)
if (Ss[i] < 0.0)
std::cout << "negative singlar value.\n";
// perr << "Smax = " << Smax << std::endl;
Ssinv.resize(Ss.size());
for (int i = 0; i < M; i++)
Ssinv[i] = (Ss[i] < (tolerance * Smax)) ? 0.0 : (1.0 / Ss[i]);
numSingular = 0;
for (int i = 0; i < Ssinv.size(); i++)
if (Ssinv[i] == 0.0)
numSingular++;
if (numSingular > 0)
std::cout << "There were " << numSingular << " singular values in strain breakup.\n";
// Compute t_n, removing singular values
for (int i = 0; i < M; i++)
{
mRealType coef = 0.0;
mRealType coef_f = 0.0;
mRealType coef_s = 0.0;
for (int j = 0; j < M; j++)
{
coef += U(j, i) * bc[j];
coef_f += Uf(j, i) * bfc[j];
coef_s += Us(j, i) * bsc[j];
}
coef *= Sinv[i];
coef_f *= Sfinv[i];
coef_s *= Ssinv[i];
for (int k = 0; k < M; k++)
{
tc[k] += coef * V(k, i);
tfc[k] += coef_f * Vf(k, i);
tsc[k] += coef_s * Vs(k, i);
}
}
// Now copy tc values into t
j = 0;
for (int i = 0; i < N; i++)
if (adjust[i])
{
t[i] = tc[j];
gt[i] = tfc[j];
dt[i] = tsc[j];
j++;
}
// Calculate chi-squared
mRealType Yk(0.0), chi2(0.0);
mRealType Yk_f(0.0), chi2_f(0.0);
mRealType Yk_s(0.0), chi2_s(0.0);
for (int ki = 0; ki < KList.size(); ki++)
{
Yk = Fk[ki]; //-Fk[ki];
Yk_f = Fk[ki];
Yk_s = dFk[ki];
for (int n = 0; n < N; n++)
{
Yk -= cnk(n, ki) * t[n];
Yk_f -= cnk(n, ki) * gt[n];
Yk_s -= dcnk(n, ki) * dt[n];
}
chi2 += KList[ki][1] * Yk * Yk;
chi2_f += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_f * Yk_f;
chi2_s += KList[ki][0] * KList[ki][0] * KList[ki][1] * Yk_s * Yk_s;
}
// std::vector<mRealType> chisqrtmp(3);
chisqrlist[0] = chi2;
chisqrlist[1] = chi2_f;
chisqrlist[2] = chi2_s;
//chisqrlist=chisqrtmp;
}
} // namespace qmcplusplus
#endif
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#include <math.h>
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
check_error(status);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaThreadSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
initmatrix.c |
/*----------------------------------------------------------------*/
#include <fftw3.h>
#include "initmatrix.h"
/*----------------------------------------------------------------*/
int hclFillSignal2D(
const int m,
const int n,
const unsigned int nt,
fftw_complex* signal
)
{
int p, q;
#pragma omp parallel for shared(signal) private(p) num_threads(nt)
for (p = 0; p < m; p++)
{
for (q = 0; q < n; q++)
{
signal[p*n+q][0] = p*n+q+1;
signal[p*n+q][1] = p*n+q+1;
}
}
return 0;
}
/*----------------------------------------------------------------*/
int hclPrintSignal2D(
const int m, const int n,
fftw_complex* signal
)
{
int p, q;
for (p = 0; p < m; p++)
{
for (q = 0; q < n; q++)
{
if ((q != 0) && ((q % 4) == 0))
{
printf(
"[%0.2lf + i * %0.2lf]\n",
signal[p*n+q][0],
signal[p*n+q][1]
);
continue;
}
printf(
"[%0.2lf + i * %0.2lf]\t",
signal[p*n+q][0],
signal[p*n+q][1]
);
}
printf("\n");
}
printf("\n");
return 0;
}
/*----------------------------------------------------------------*/
|
core_ssymm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsymm.c, normal z -> s, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_symm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a symmetric matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] B
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_ssymm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
float alpha, const float *A, int lda,
const float *B, int ldb,
float beta, float *C, int ldc)
{
cblas_ssymm(CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
m, n,
(alpha), A, lda,
B, ldb,
(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_ssymm(
plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
float alpha, const float *A, int lda,
const float *B, int ldb,
float beta, float *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*n]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_ssymm(side, uplo,
m, n,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
convolutionbnrelu_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void convbnrelu3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr0n = outptr0 + outw;
float* outptr1n = outptr1 + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r0
"add %5, %5, #16 \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n"// r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// _sum1
"fmla v6.4s, v8.4s, %18.s[0] \n"
"fmla v7.4s, v8.4s, %21.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3] \n"// _sum0n
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v13.4s}, [%4] \n"// _sum1n
"fmla v12.4s, v14.4s, %20.s[0] \n"
"fmla v13.4s, v14.4s, %23.s[0] \n"
"ext v8.16b, v8.16b, v9.16b, #8 \n"
"ext v9.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v10.4s, %18.s[1] \n"
"fmla v7.4s, v10.4s, %21.s[1] \n"
"fmla v12.4s, v11.4s, %20.s[2] \n"
"fmla v13.4s, v11.4s, %23.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v14.4s, v15.4s}, [%6] \n"// r1
"add %6, %6, #16 \n"
"fmla v6.4s, v8.4s, %18.s[2] \n"
"fmla v7.4s, v8.4s, %21.s[2] \n"
"fmla v12.4s, v9.4s, %20.s[1] \n"
"fmla v13.4s, v9.4s, %23.s[1] \n"
"ext v10.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v14.4s, %19.s[0] \n"
"fmla v7.4s, v14.4s, %22.s[0] \n"
"fmla v12.4s, v14.4s, %18.s[0] \n"
"fmla v13.4s, v14.4s, %21.s[0] \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"fmla v6.4s, v10.4s, %19.s[1] \n"
"fmla v7.4s, v10.4s, %22.s[1] \n"
"fmla v12.4s, v10.4s, %18.s[1] \n"
"fmla v13.4s, v10.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v8.4s, v9.4s}, [%7] \n"// r2
"add %7, %7, #16 \n"
"fmla v6.4s, v11.4s, %19.s[2] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v12.4s, v11.4s, %18.s[2] \n"
"fmla v13.4s, v11.4s, %21.s[2] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"fmla v6.4s, v8.4s, %20.s[0] \n"
"fmla v7.4s, v8.4s, %23.s[0] \n"
"fmla v12.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v8.4s, %22.s[0] \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %20.s[1] \n"
"fmla v7.4s, v10.4s, %23.s[1] \n"
"fmla v12.4s, v10.4s, %19.s[1] \n"
"fmla v13.4s, v10.4s, %22.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r0
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %20.s[2] \n"
"fmla v7.4s, v11.4s, %23.s[2] \n"
"fmla v12.4s, v11.4s, %19.s[2] \n"
"fmla v13.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n"// r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"st1 {v12.4s}, [%3], #16 \n"
"st1 {v13.4s}, [%4], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
"sub %8, %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q14, q15, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1
"vmla.f32 q6, q8, %e18[0] \n"
"vmla.f32 q7, q8, %e21[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3] \n"// _sum0n
"pld [%4, #128] \n"
"vld1.f32 {d26-d27}, [%4] \n"// _sum1n
"vmla.f32 q12, q14, %e20[0] \n"
"vmla.f32 q13, q14, %e23[0] \n"
"vext.32 q8, q8, q9, #2 \n"
"vext.32 q9, q14, q15, #1 \n"
"vmla.f32 q6, q10, %e18[1] \n"
"vmla.f32 q7, q10, %e21[1] \n"
"vmla.f32 q12, q11, %f20[0] \n"
"vmla.f32 q13, q11, %f23[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d28-d30}, [%6] \n"// r1
"add %6, #16 \n"
"vmla.f32 q6, q8, %f18[0] \n"
"vmla.f32 q7, q8, %f21[0] \n"
"vmla.f32 q12, q9, %e20[1] \n"
"vmla.f32 q13, q9, %e23[1] \n"
"vext.32 q10, q14, q15, #1 \n"
"vmla.f32 q6, q14, %e19[0] \n"
"vmla.f32 q7, q14, %e22[0] \n"
"vmla.f32 q12, q14, %e18[0] \n"
"vmla.f32 q13, q14, %e21[0] \n"
"vext.32 q11, q14, q15, #2 \n"
"vmla.f32 q6, q10, %e19[1] \n"
"vmla.f32 q7, q10, %e22[1] \n"
"vmla.f32 q12, q10, %e18[1] \n"
"vmla.f32 q13, q10, %e21[1] \n"
"pld [%7, #192] \n"
"vld1.f32 {d16-d18}, [%7 :64] \n"// r2
"add %7, #16 \n"
"vmla.f32 q6, q11, %f19[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q12, q11, %f18[0] \n"
"vmla.f32 q13, q11, %f21[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vmla.f32 q6, q8, %e20[0] \n"
"vmla.f32 q7, q8, %e23[0] \n"
"vmla.f32 q12, q8, %e19[0] \n"
"vmla.f32 q13, q8, %e22[0] \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e20[1] \n"
"vmla.f32 q7, q10, %e23[1] \n"
"vmla.f32 q12, q10, %e19[1] \n"
"vmla.f32 q13, q10, %e22[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"vmla.f32 q6, q11, %f20[0] \n"
"vmla.f32 q7, q11, %f23[0] \n"
"vmla.f32 q12, q11, %f19[0] \n"
"vmla.f32 q13, q11, %f22[0] \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vst1.f32 {d12-d13}, [%1 : 64]!\n"
"vst1.f32 {d14-d15}, [%2 : 64]!\n"
"vext.32 q11, q14, q15, #2 \n"
"vst1.f32 {d24-d25}, [%3]! \n"
"vst1.f32 {d26-d27}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %5, #16 \n"
"sub %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
float32x4_t _sum0n = vmulq_f32(_r10, _k00);
float32x4_t _sum1n = vmulq_f32(_r10, _k10);
_sum0n = vmlaq_f32(_sum0n, _r20, _k03);
_sum1n = vmlaq_f32(_sum1n, _r20, _k13);
_sum0n = vmlaq_f32(_sum0n, _r30, _k06);
_sum1n = vmlaq_f32(_sum1n, _r30, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
_sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3);
_sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
*outptr0n = vaddvq_f32(_sum0n);
*outptr1n = vaddvq_f32(_sum1n);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n));
float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
*outptr0n = vget_lane_f32(_ss01n, 0);
*outptr1n = vget_lane_f32(_ss01n, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum0n = 0.f;
float sum1 = 0.f;
float sum1n = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
sum0n += r1[0] * k0[0];
sum0n += r1[1] * k0[1];
sum0n += r1[2] * k0[2];
sum0n += r2[0] * k0[3];
sum0n += r2[1] * k0[4];
sum0n += r2[2] * k0[5];
sum0n += r3[0] * k0[6];
sum0n += r3[1] * k0[7];
sum0n += r3[2] * k0[8];
sum1n += r1[0] * k1[0];
sum1n += r1[1] * k1[1];
sum1n += r1[2] * k1[2];
sum1n += r2[0] * k1[3];
sum1n += r2[1] * k1[4];
sum1n += r2[2] * k1[5];
sum1n += r3[0] * k1[6];
sum1n += r3[1] * k1[7];
sum1n += r3[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"// r0
"add %3, %3, #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// _sum1
"fmul v14.4s, v8.4s, %12.s[0] \n"
"fmul v15.4s, v8.4s, %15.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %12.s[1] \n"
"fmla v7.4s, v10.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"// r1
"add %4, %4, #16 \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"fmla v15.4s, v11.4s, %15.s[2] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v14.4s, v10.4s, %13.s[1] \n"
"fmla v15.4s, v10.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r2
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %13.s[2] \n"
"fmla v7.4s, v11.4s, %16.s[2] \n"
"fmla v14.4s, v8.4s, %14.s[0] \n"
"fmla v15.4s, v8.4s, %17.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %14.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[1] \n"
"fmla v14.4s, v11.4s, %14.s[2] \n"
"fmla v15.4s, v11.4s, %17.s[2] \n"
"fadd v6.4s, v6.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v15.4s \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r0
"add %3, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// _sum1
"vmul.f32 q14, q8, %e12[0] \n"
"vmul.f32 q15, q8, %e15[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e12[1] \n"
"vmla.f32 q7, q10, %e15[1] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q14, q11, %f12[0] \n"
"vmla.f32 q15, q11, %f15[0] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q14, q10, %e13[1] \n"
"vmla.f32 q15, q10, %e16[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5] \n"// r2
"add %5, #16 \n"
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"vmla.f32 q14, q8, %e14[0] \n"
"vmla.f32 q15, q8, %e17[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e14[1] \n"
"vmla.f32 q7, q10, %e17[1] \n"
"vmla.f32 q14, q11, %f14[0] \n"
"vmla.f32 q15, q11, %f17[0] \n"
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k3456 = vld1q_f32(kernel0+3);
float32x4_t _k6789 = vld1q_f32(kernel0+6);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n"// r0
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// _sum
"fmla v7.4s, v9.4s, %14.s[0] \n"
"fmul v6.4s, v11.4s, %14.s[1] \n"
"fmul v13.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n"// r1
"add %4, %4, #16 \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %15.s[1] \n"
"fmla v13.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n"// _sum2
"fmla v8.4s, v9.4s, %14.s[0] \n"
"fmul v14.4s, v11.4s, %14.s[1] \n"
"fmul v15.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v9.4s, v10.4s}, [%5] \n"// r2
"add %5, %5, #16 \n"
"fmla v7.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %16.s[1] \n"
"fmla v13.4s, v12.4s, %16.s[2] \n"
"fmla v8.4s, v9.4s, %15.s[0] \n"
"fmla v14.4s, v11.4s, %15.s[1] \n"
"fmla v15.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v9.4s, v10.4s}, [%6] \n"// r3
"add %6, %6, #16 \n"
"fmla v8.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v14.4s, v11.4s, %16.s[1] \n"
"fmla v15.4s, v12.4s, %16.s[2] \n"
"fadd v7.4s, v7.4s, v6.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n"// r0
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v8.4s, v8.4s, v15.4s \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"add %3, %3, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %3, %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1 :64] \n"// _sum
"vmla.f32 q7, q9, %e14[0] \n"
"vmul.f32 q6, q11, %e14[1] \n"
"vmul.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// _sum2
"vmla.f32 q8, q9, %e14[0] \n"
"vmul.f32 q14, q11, %e14[1] \n"
"vmul.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n"// r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n"// r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
float32x4_t _sum2 = vmulq_f32(_r10, _k0123);
_sum2 = vmlaq_f32(_sum2, _r20, _k3456);
_sum2 = vmlaq_f32(_sum2, _r30, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
_sum2 = vsetq_lane_f32(*outptr2, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// r0
"add %2, %2, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// _sum
"fmla v7.4s, v8.4s, %10.s[0] \n"
"fmul v13.4s, v10.4s, %10.s[1] \n"
"fmul v14.4s, v11.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"// r1
"add %3, %3, #16 \n"
"fmla v7.4s, v8.4s, %11.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %11.s[1] \n"
"fmla v14.4s, v11.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"// r2
"add %4, %4, #16 \n"
"fmla v7.4s, v8.4s, %12.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %12.s[1] \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// r0
"add %2, %2, #16 \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"st1 {v7.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum
"vmla.f32 q7, q8, %e10[0] \n"
"vmul.f32 q13, q10, %e10[1] \n"
"vmul.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
//////////////////BN RELU///////////////////////////
{
int size = top_blob.w * top_blob.h;
const float *a_data_ptr = a_data;
const float *b_data_ptr = b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++) {
{
float *ptr = top_blob.channel(q);
float a = a_data_ptr[q];
float b = b_data_ptr[q];
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v1.4s, %w4 \n"
"dup v2.4s, %w5 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"orr v3.16b, v1.16b, v1.16b \n"
"fmla v3.4s, v0.4s, v2.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v3.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"vdup.f32 q1, %4 \n"
"vdup.f32 q2, %5 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vorr.32 q3, q1, q1 \n"
"vmla.f32 q3, q0, q2 \n"
"subs %0, #1 \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
ptr = top_blob.channel(q);
#if __ARM_NEON
nn = size >> 2;
remain = size - (nn << 2);
#else
remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(ptr);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(ptr, _p);
ptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"veor q1, q0, q0 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q0, q0, q1 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr)
: "cc", "memory", "q0", "q1"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--) {
*ptr = b * *ptr + a;
*ptr = std::max(*ptr, 0.f);
ptr++;
}
}
}
}
}
static void convbnrelu3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator);
// bottom_blob_tm.create(inch, tiles, 64);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0 += img0_tm.w*tiles*25;
r0_tm1 += img0_tm.w*tiles*25;
r0_tm2 += img0_tm.w*tiles*25;
r0_tm3 += img0_tm.w*tiles*25;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*24);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles*32);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*40);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*48);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*56);
int step = img0_tm.w*tiles*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%0], %26 \n"
"vst1.f32 {d6[1]}, [%1], %26 \n"
"vst1.f32 {d7[0]}, [%2], %26 \n"
"vst1.f32 {d7[1]}, [%3], %26 \n"
"vst1.f32 {d12[0]}, [%0] \n"
"vst1.f32 {d12[1]}, [%1] \n"
"vst1.f32 {d13[0]}, [%2] \n"
"vst1.f32 {d13[1]}, [%3] \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%4], %26 \n"
"vst1.f32 {d6[1]}, [%5], %26 \n"
"vst1.f32 {d7[0]}, [%6], %26 \n"
"vst1.f32 {d7[1]}, [%7], %26 \n"
"vst1.f32 {d12[0]}, [%4] \n"
"vst1.f32 {d12[1]}, [%5] \n"
"vst1.f32 {d13[0]}, [%6] \n"
"vst1.f32 {d13[1]}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm1_0), // %1
"=r"(r0_tm2_0), // %2
"=r"(r0_tm3_0), // %3
"=r"(r0_tm0_4), // %4
"=r"(r0_tm1_4), // %5
"=r"(r0_tm2_4), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm1_0),
"2"(r0_tm2_0),
"3"(r0_tm3_0),
"4"(r0_tm0_4),
"5"(r0_tm1_4),
"6"(r0_tm2_4),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_1[0] = tmp12a + tmp12b;
r0_tm_2[0] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_3[0] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_5[0] = tmp56a + tmp56b;
r0_tm_6[0] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 8;
r0_tm_1 += img0_tm.w * tiles * 8;
r0_tm_2 += img0_tm.w * tiles * 8;
r0_tm_3 += img0_tm.w * tiles * 8;
r0_tm_4 += img0_tm.w * tiles * 8;
r0_tm_5 += img0_tm.w * tiles * 8;
r0_tm_6 += img0_tm.w * tiles * 8;
r0_tm_7 += img0_tm.w * tiles * 8;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
// permute
// bottom_blob_tm.create(1, 64 * tiles, inch);
// Mat bottom_blob_tm2(inch, tiles, 64);
Mat bottom_blob_tm2(8*inch, tiles/8 + (tiles%8)/4 + tiles%4, 64, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
float* tm2p = tm2.row(i/8);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
vst1q_f32(tm2p, _r0);
vst1q_f32(tm2p+4, _r0n);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
tm2p[4] = r0[4];
tm2p[5] = r0[5];
tm2p[6] = r0[6];
tm2p[7] = r0[7];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 8;
}
}
for (; i+3<tiles; i+=4)
{
float* tm2p = tm2.row(i/8+(i%8)/4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
vst1q_f32(tm2p, _r0);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 4;
}
}
for (; i<tiles; i++)
{
float* tm2p = tm2.row(i/8+(i%8)/4+i%4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
tm2p[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tm2p += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(1, 64 * tiles, outch);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const Mat kernel_tm0 = kernel_tm.channel(p/8);
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
Mat out4_tm = top_blob_tm.channel(p+4);
Mat out5_tm = top_blob_tm.channel(p+5);
Mat out6_tm = top_blob_tm.channel(p+6);
Mat out7_tm = top_blob_tm.channel(p+7);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
float* output4_tm = out4_tm;
float* output5_tm = out5_tm;
float* output6_tm = out6_tm;
float* output7_tm = out7_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0123 = vdupq_n_f32(0.f);
float32x4_t _sum4567 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1);
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm4 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm5 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm6 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm7 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3);
}
for (; q<inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123);
_sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567);
bb2p0 += 1;
ktm0 += 8;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
float sum4 = vgetq_lane_f32(_sum4567, 0);
float sum5 = vgetq_lane_f32(_sum4567, 1);
float sum6 = vgetq_lane_f32(_sum4567, 2);
float sum7 = vgetq_lane_f32(_sum4567, 3);
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output4_tm[0] = sum4;
output5_tm[0] = sum5;
output6_tm[0] = sum6;
output7_tm[0] = sum7;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"veor q14, q14, q14 \n"
"veor q15, q15, q15 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
"vst1.f32 {d20-d23}, [%1]! \n"
"vst1.f32 {d24-d27}, [%2]! \n"
"vst1.f32 {d28-d31}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum0_4 = 0.f;
float sum0_5 = 0.f;
float sum0_6 = 0.f;
float sum0_7 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum1_4 = 0.f;
float sum1_5 = 0.f;
float sum1_6 = 0.f;
float sum1_7 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum2_4 = 0.f;
float sum2_5 = 0.f;
float sum2_6 = 0.f;
float sum2_7 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
float sum3_4 = 0.f;
float sum3_5 = 0.f;
float sum3_6 = 0.f;
float sum3_7 = 0.f;
for (int q=0; q<inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum0_4 += bb2p0[4] * ktm0[0];
sum0_5 += bb2p0[5] * ktm0[0];
sum0_6 += bb2p0[6] * ktm0[0];
sum0_7 += bb2p0[7] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum1_4 += bb2p0[4] * ktm0[1];
sum1_5 += bb2p0[5] * ktm0[1];
sum1_6 += bb2p0[6] * ktm0[1];
sum1_7 += bb2p0[7] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum2_4 += bb2p0[4] * ktm0[2];
sum2_5 += bb2p0[5] * ktm0[2];
sum2_6 += bb2p0[6] * ktm0[2];
sum2_7 += bb2p0[7] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
sum3_4 += bb2p0[4] * ktm0[3];
sum3_5 += bb2p0[5] * ktm0[3];
sum3_6 += bb2p0[6] * ktm0[3];
sum3_7 += bb2p0[7] * ktm0[3];
bb2p0 += 8;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output0_tm[4] = sum0_4;
output0_tm[5] = sum0_5;
output0_tm[6] = sum0_6;
output0_tm[7] = sum0_7;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output1_tm[4] = sum1_4;
output1_tm[5] = sum1_5;
output1_tm[6] = sum1_6;
output1_tm[7] = sum1_7;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output2_tm[4] = sum2_4;
output2_tm[5] = sum2_5;
output2_tm[6] = sum2_6;
output2_tm[7] = sum2_7;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output3_tm[4] = sum3_4;
output3_tm[5] = sum3_5;
output3_tm[6] = sum3_6;
output3_tm[7] = sum3_7;
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
#endif // __ARM_NEON
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"vst1.f32 {d20-d21}, [%2]! \n"
"vst1.f32 {d22-d23}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
bb2p0 += 4;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif // __ARM_NEON
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
float32x4_t _sum0123 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
#if __aarch64__
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3);
#else
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1);
#endif // __aarch64__
}
for (; q<inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0 = vld1q_f32(ktm0);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0);
bb2p0 += 1;
ktm0 += 4;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[0] * ktm0[1];
sum2 += bb2p0[0] * ktm0[2];
sum3 += bb2p0[0] * ktm0[3];
bb2p0 += 1;
ktm0 += 4;
}
#endif // __ARM_NEON
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4+p%4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4+p%4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
float* output0_tm = out0_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
sum4 += bb2p0[4] * ktm0[0];
sum5 += bb2p0[5] * ktm0[0];
sum6 += bb2p0[6] * ktm0[0];
sum7 += bb2p0[7] * ktm0[0];
bb2p0 += 8;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm[4] = sum4;
output0_tm[5] = sum5;
output0_tm[6] = sum6;
output0_tm[7] = sum7;
output0_tm += 8;
#endif // __ARM_NEON
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #32] \n"
"ld1r {v0.4s}, [%5], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4]! \n"
"pld [%5, #32] \n"
"vld1.f32 {d0[],d1[]}, [%5]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
bb2p0 += 4;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm += 4;
#endif // __ARM_NEON
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
int q=0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
float32x4_t _ktm0 = vld1q_f32(ktm0);
ktm0 += 4;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0);
#endif // __aarch64__
#else
float sum0 = 0.f;
#endif
for (; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
bb2p0 += 1;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
#if __aarch64__
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm_00;
float32x4_t _output0_tm_11;
float32x4_t _output0_tm_22;
float32x4_t _output0_tm_33;
float32x4_t _output0_tm_44;
float32x4_t _output0_tm_55;
float32x4_t _output0_tm_66;
float32x4_t _output0_tm_77;
_output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0);
_output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1);
_output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2);
_output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3);
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0 += out0_tm.w*tiles*25;
output0_tm1 += out0_tm.w*tiles*25;
output0_tm2 += out0_tm.w*tiles*25;
output0_tm3 += out0_tm.w*tiles*25;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*24);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles*32);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*40);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*48);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*56);
float* t0 = tmp[0];
float* t1 = tmp[1];
// int step = out0_tm.w * tiles * 2*4 *4;
int step = out0_tm.w * tiles *4;
asm volatile(
// loop0
// "vld1.f32 {d16-d17}, [%2], %21 \n"
// "vld1.f32 {d18-d19}, [%3], %21 \n"
// "vld1.f32 {d20-d21}, [%4], %21 \n"
// "vld1.f32 {d22-d23}, [%5], %21 \n"
// "vld1.f32 {d24-d25}, [%6], %21 \n"
// "vld1.f32 {d26-d27}, [%7], %21 \n"
// "vld1.f32 {d28-d29}, [%8], %21 \n"
// "vld1.f32 {d30-d31}, [%9], %21 \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%2], %21 \n"
"vld1.f32 {d16[1]}, [%3], %21 \n"
"vld1.f32 {d17[0]}, [%4], %21 \n"
"vld1.f32 {d17[1]}, [%5], %21 \n"
"vld1.f32 {d20[0]}, [%2], %21 \n"
"vld1.f32 {d20[1]}, [%3], %21 \n"
"vld1.f32 {d21[0]}, [%4], %21 \n"
"vld1.f32 {d21[1]}, [%5], %21 \n"
"vld1.f32 {d24[0]}, [%2], %21 \n"
"vld1.f32 {d24[1]}, [%3], %21 \n"
"vld1.f32 {d25[0]}, [%4], %21 \n"
"vld1.f32 {d25[1]}, [%5], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %21 \n"
"vld1.f32 {d28[1]}, [%3], %21 \n"
"vld1.f32 {d29[0]}, [%4], %21 \n"
"vld1.f32 {d29[1]}, [%5], %21 \n"
"vld1.f32 {d18[0]}, [%2], %21 \n"
"vld1.f32 {d18[1]}, [%3], %21 \n"
"vld1.f32 {d19[0]}, [%4], %21 \n"
"vld1.f32 {d19[1]}, [%5], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %21 \n"
"vld1.f32 {d22[1]}, [%3], %21 \n"
"vld1.f32 {d23[0]}, [%4], %21 \n"
"vld1.f32 {d23[1]}, [%5], %21 \n"
"vld1.f32 {d26[0]}, [%2], %21 \n"
"vld1.f32 {d26[1]}, [%3], %21 \n"
"vld1.f32 {d27[0]}, [%4], %21 \n"
"vld1.f32 {d27[1]}, [%5], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2] \n"
"vld1.f32 {d30[1]}, [%3] \n"
"vld1.f32 {d31[0]}, [%4] \n"
"vld1.f32 {d31[1]}, [%5] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
// "vld1.f32 {d16-d17}, [%2] \n"
// "vld1.f32 {d18-d19}, [%3] \n"
// "vld1.f32 {d20-d21}, [%4] \n"
// "vld1.f32 {d22-d23}, [%5] \n"
// "vld1.f32 {d24-d25}, [%6] \n"
// "vld1.f32 {d26-d27}, [%7] \n"
// "vld1.f32 {d28-d29}, [%8] \n"
// "vld1.f32 {d30-d31}, [%9] \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%6], %21 \n"
"vld1.f32 {d16[1]}, [%7], %21 \n"
"vld1.f32 {d17[0]}, [%8], %21 \n"
"vld1.f32 {d17[1]}, [%9], %21 \n"
"vld1.f32 {d20[0]}, [%6], %21 \n"
"vld1.f32 {d20[1]}, [%7], %21 \n"
"vld1.f32 {d21[0]}, [%8], %21 \n"
"vld1.f32 {d21[1]}, [%9], %21 \n"
"vld1.f32 {d24[0]}, [%6], %21 \n"
"vld1.f32 {d24[1]}, [%7], %21 \n"
"vld1.f32 {d25[0]}, [%8], %21 \n"
"vld1.f32 {d25[1]}, [%9], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%6], %21 \n"
"vld1.f32 {d28[1]}, [%7], %21 \n"
"vld1.f32 {d29[0]}, [%8], %21 \n"
"vld1.f32 {d29[1]}, [%9], %21 \n"
"vld1.f32 {d18[0]}, [%6], %21 \n"
"vld1.f32 {d18[1]}, [%7], %21 \n"
"vld1.f32 {d19[0]}, [%8], %21 \n"
"vld1.f32 {d19[1]}, [%9], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%6], %21 \n"
"vld1.f32 {d22[1]}, [%7], %21 \n"
"vld1.f32 {d23[0]}, [%8], %21 \n"
"vld1.f32 {d23[1]}, [%9], %21 \n"
"vld1.f32 {d26[0]}, [%6], %21 \n"
"vld1.f32 {d26[1]}, [%7], %21 \n"
"vld1.f32 {d27[0]}, [%8], %21 \n"
"vld1.f32 {d27[1]}, [%9], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%6] \n"
"vld1.f32 {d30[1]}, [%7] \n"
"vld1.f32 {d31[0]}, [%8] \n"
"vld1.f32 {d31[1]}, [%9] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm1_0), // %3
"=r"(output0_tm2_0), // %4
"=r"(output0_tm3_0), // %5
"=r"(output0_tm0_4), // %6
"=r"(output0_tm1_4), // %7
"=r"(output0_tm2_4), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm1_0),
"4"(output0_tm2_0),
"5"(output0_tm3_0),
"6"(output0_tm0_4),
"7"(output0_tm1_4),
"8"(output0_tm2_4),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 8;
output0_tm_1 += out0_tm.w * tiles * 8;
output0_tm_2 += out0_tm.w * tiles * 8;
output0_tm_3 += out0_tm.w * tiles * 8;
output0_tm_4 += out0_tm.w * tiles * 8;
output0_tm_5 += out0_tm.w * tiles * 8;
output0_tm_6 += out0_tm.w * tiles * 8;
output0_tm_7 += out0_tm.w * tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
//////////////////BN RELU///////////////////////////
{
int size = top_blob.w * top_blob.h;
const float *a_data_ptr = a_data;
const float *b_data_ptr = b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++) {
{
float *ptr = top_blob.channel(q);
float a = a_data_ptr[q];
float b = b_data_ptr[q];
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v1.4s, %w4 \n"
"dup v2.4s, %w5 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"orr v3.16b, v1.16b, v1.16b \n"
"fmla v3.4s, v0.4s, v2.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v3.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"vdup.f32 q1, %4 \n"
"vdup.f32 q2, %5 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vorr.32 q3, q1, q1 \n"
"vmla.f32 q3, q0, q2 \n"
"subs %0, #1 \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
ptr = top_blob.channel(q);
#if __ARM_NEON
nn = size >> 2;
remain = size - (nn << 2);
#else
remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(ptr);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(ptr, _p);
ptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"veor q1, q0, q0 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q0, q0, q1 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr)
: "cc", "memory", "q0", "q1"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--) {
*ptr = b * *ptr + a;
*ptr = std::max(*ptr, 0.f);
ptr++;
}
}
}
}
}
static void convbnrelu3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// v6 = _sum0
"fmul v12.4s, v8.4s, %12.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// v7 = _sum1
"fmul v13.4s, v8.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"// v10
"fmla v6.4s, v9.4s, %12.s[1] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v7.4s, v9.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1
"fmla v12.4s, v14.4s, %12.s[2] \n"
"fmla v13.4s, v14.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v12.4s, v9.4s, %13.s[1] \n"
"fmla v13.4s, v9.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2
"fmla v6.4s, v14.4s, %13.s[2] \n"
"fmla v7.4s, v14.4s, %16.s[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"fmla v12.4s, v8.4s, %14.s[0] \n"
"fmla v13.4s, v8.4s, %17.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v6.4s, v9.4s, %14.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[1] \n"
"fmla v12.4s, v14.4s, %14.s[2] \n"
"fmla v13.4s, v14.4s, %17.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"fadd v6.4s, v6.4s, v12.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0
"vmul.f32 q12, q8, %e12[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1
"vmul.f32 q13, q8, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d20-d21}, [%3] \n"// q10
"vmla.f32 q6, q9, %e12[1] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q7, q9, %e15[1] \n"
"pld [%4, #256] \n"
"vld2.f32 {d16-d19}, [%4]! \n"// r1
"vmla.f32 q12, q11, %f12[0] \n"
"vmla.f32 q13, q11, %f15[0] \n"
"pld [%4, #128] \n"
"vld2.f32 {d20-d21}, [%4] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q12, q9, %e13[1] \n"
"vmla.f32 q13, q9, %e16[1] \n"
"pld [%5, #256] \n"
"vld2.f32 {d16-d19}, [%5]! \n"// r2
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d20-d21}, [%5] \n"
"vmla.f32 q12, q8, %e14[0] \n"
"vmla.f32 q13, q8, %e17[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q6, q9, %e14[1] \n"
"vmla.f32 q7, q9, %e17[1] \n"
"vmla.f32 q12, q11, %f14[0] \n"
"vmla.f32 q13, q11, %f17[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"vadd.f32 q6, q6, q12 \n"
"vadd.f32 q7, q7, q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"bne 0b \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
//////////////////BN RELU///////////////////////////
{
int size = top_blob.w * top_blob.h;
const float *a_data_ptr = a_data;
const float *b_data_ptr = b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++) {
{
float *ptr = top_blob.channel(q);
float a = a_data_ptr[q];
float b = b_data_ptr[q];
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v1.4s, %w4 \n"
"dup v2.4s, %w5 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"orr v3.16b, v1.16b, v1.16b \n"
"fmla v3.4s, v0.4s, v2.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v3.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"vdup.f32 q1, %4 \n"
"vdup.f32 q2, %5 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vorr.32 q3, q1, q1 \n"
"vmla.f32 q3, q0, q2 \n"
"subs %0, #1 \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
ptr = top_blob.channel(q);
#if __ARM_NEON
nn = size >> 2;
remain = size - (nn << 2);
#else
remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(ptr);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(ptr, _p);
ptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"veor q1, q0, q0 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q0, q0, q1 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr)
: "cc", "memory", "q0", "q1"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--) {
*ptr = b * *ptr + a;
*ptr = std::max(*ptr, 0.f);
ptr++;
}
}
}
}
}
static void convbnrelu3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& a_data, const Mat& b_data)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
// const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p+0);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
Mat out4 = top_blob.channel(p+4);
Mat out5 = top_blob.channel(p+5);
Mat out6 = top_blob.channel(p+6);
Mat out7 = top_blob.channel(p+7);
const float bias0 = bias ? bias[p+0] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float bias4 = bias ? bias[p+4] : 0.f;
const float bias5 = bias ? bias[p+5] : 0.f;
const float bias6 = bias ? bias[p+6] : 0.f;
const float bias7 = bias ? bias[p+7] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
out4.fill(bias4);
out5.fill(bias5);
out6.fill(bias6);
out7.fill(bias7);
const float* ktmp = _kernel.channel(p/8);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
float* outptr4 = out4;
float* outptr5 = out5;
float* outptr6 = out6;
float* outptr7 = out7;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v9.4s}, [%2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v10.4s}, [%3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v11.4s}, [%4] \n"
///
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v4.4s, v5.4s}, [%9], #32 \n"// v4=00 v5=01
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v12.4s}, [%5] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v13.4s}, [%6] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v14.4s}, [%7] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v15.4s}, [%8] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v6.4s, v7.4s}, [%9] \n"// v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=02
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
///
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v4.4s, v5.4s}, [%10], #32 \n"// v4=10 v5=11
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v6.4s, v7.4s}, [%10] \n"// v6
"fmla v8.4s, v5.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v5.4s, v0.s[2] \n"
"fmla v11.4s, v5.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=12
"fmla v12.4s, v5.4s, v1.s[0] \n"
"fmla v13.4s, v5.4s, v1.s[1] \n"
"fmla v14.4s, v5.4s, v1.s[2] \n"
"fmla v15.4s, v5.4s, v1.s[3] \n"
///
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v4.4s, v5.4s}, [%11], #32 \n"// v4=20 v5=21
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v3.s[0] \n"
"fmla v13.4s, v6.4s, v3.s[1] \n"
"fmla v14.4s, v6.4s, v3.s[2] \n"
"fmla v15.4s, v6.4s, v3.s[3] \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v6.4s, v7.4s}, [%11] \n"// v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=22
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
"sub %12, %12, #288 \n"
"st1 {v12.4s}, [%5], #16 \n"
"st1 {v13.4s}, [%6], #16 \n"
"subs %w0, %w0, #1 \n"
"st1 {v14.4s}, [%7], #16 \n"
"st1 {v15.4s}, [%8], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1] \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2] \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3] \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4] \n"
///
"pld [%9, #256] \n"
"vld2.f32 {d8-d11}, [%9]! \n"// q4=00 q5=01
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5] \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"pld [%7, #128] \n"
"vld1.f32 {d28-d29}, [%7] \n"
"pld [%8, #128] \n"
"vld1.f32 {d30-d31}, [%8] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%9, #128] \n"
"vld2.f32 {d12-d13}, [%9] \n"// q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=02
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
///
"pld [%10, #256] \n"
"vld2.f32 {d8-d11}, [%10]! \n"// q4=10 q5=11
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q4, d4[1] \n"
"vmla.f32 q10, q4, d5[0] \n"
"vmla.f32 q11, q4, d5[1] \n"
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q4, d6[0] \n"
"vmla.f32 q13, q4, d6[1] \n"
"vmla.f32 q14, q4, d7[0] \n"
"vmla.f32 q15, q4, d7[1] \n"
"pld [%10, #128] \n"
"vld2.f32 {d12-d13}, [%10] \n"// q6
"vmla.f32 q8, q5, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q5, d1[0] \n"
"vmla.f32 q11, q5, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=12
"vmla.f32 q12, q5, d2[0] \n"
"vmla.f32 q13, q5, d2[1] \n"
"vmla.f32 q14, q5, d3[0] \n"
"vmla.f32 q15, q5, d3[1] \n"
///
"pld [%11, #256] \n"
"vld2.f32 {d8-d11}, [%11]! \n"// q4=20 q5=21
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q6, d6[0] \n"
"vmla.f32 q13, q6, d6[1] \n"
"vmla.f32 q14, q6, d7[0] \n"
"vmla.f32 q15, q6, d7[1] \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%11, #128] \n"
"vld2.f32 {d12-d13}, [%11] \n"// q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=22
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vst1.f32 {d20-d21}, [%3]! \n"
"vst1.f32 {d22-d23}, [%4]! \n"
"sub %12, %12, #288 \n"
"vst1.f32 {d24-d25}, [%5]! \n"
"vst1.f32 {d26-d27}, [%6]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d29}, [%7]! \n"
"vst1.f32 {d30-d31}, [%8]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.4s}, [%8] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"ld1 {v8.s}[0], [%0] \n"
"ld1 {v8.s}[1], [%1] \n"
"ld1 {v8.s}[2], [%2] \n"
"ld1 {v8.s}[3], [%3] \n"
"fmul v14.4s, v10.4s, v0.s[0] \n"
"fmul v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v9.s}[0], [%4] \n"
"ld1 {v9.s}[1], [%5] \n"
"ld1 {v9.s}[2], [%6] \n"
"ld1 {v9.s}[3], [%7] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v1.4s}, [%9] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[0] \n"
"fmla v9.4s, v13.4s, v1.s[0] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v1.s[1] \n"
"fmla v15.4s, v11.4s, v1.s[1] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[2] \n"
"fmla v9.4s, v13.4s, v1.s[2] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v0.4s}, [%10] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[0] \n"
"fmla v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v9.4s, v9.4s, v15.4s \n"
"sub %11, %11, #288 \n"
"st1 {v8.s}[0], [%0], #4 \n"
"st1 {v8.s}[1], [%1], #4 \n"
"st1 {v8.s}[2], [%2], #4 \n"
"st1 {v8.s}[3], [%3], #4 \n"
"st1 {v9.s}[0], [%4], #4 \n"
"st1 {v9.s}[1], [%5], #4 \n"
"st1 {v9.s}[2], [%6], #4 \n"
"st1 {v9.s}[3], [%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vld1.f32 {d16[0]}, [%0] \n"
"vld1.f32 {d16[1]}, [%1] \n"
"vld1.f32 {d17[0]}, [%2] \n"
"vld1.f32 {d17[1]}, [%3] \n"
"vmul.f32 q14, q10, d0[0] \n"
"vmul.f32 q15, q11, d0[0] \n"
"vld1.f32 {d18[0]}, [%4] \n"
"vld1.f32 {d18[1]}, [%5] \n"
"vld1.f32 {d19[0]}, [%6] \n"
"vld1.f32 {d19[1]}, [%7] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"pld [%9, #128] \n"
"vld1.f32 {d2-d3}, [%9] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d2[0] \n"
"vmla.f32 q9, q13, d2[0] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d3[0] \n"
"vmla.f32 q9, q13, d3[0] \n"
"pld [%10, #128] \n"
"vld1.f32 {d0-d1}, [%10] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q11, d0[0] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q9, q9, q15 \n"
"sub %11, %11, #288 \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%1]! \n"
"vst1.f32 {d17[0]}, [%2]! \n"
"vst1.f32 {d17[1]}, [%3]! \n"
"vst1.f32 {d18[0]}, [%4]! \n"
"vst1.f32 {d18[1]}, [%5]! \n"
"vst1.f32 {d19[0]}, [%6]! \n"
"vst1.f32 {d19[1]}, [%7]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else // __ARM_NEON
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
sum0 += r0[0] * ktmp[0];
sum1 += r0[0] * ktmp[1];
sum2 += r0[0] * ktmp[2];
sum3 += r0[0] * ktmp[3];
sum4 += r0[0] * ktmp[4];
sum5 += r0[0] * ktmp[5];
sum6 += r0[0] * ktmp[6];
sum7 += r0[0] * ktmp[7];
ktmp += 8;
sum0 += r0[1] * ktmp[0];
sum1 += r0[1] * ktmp[1];
sum2 += r0[1] * ktmp[2];
sum3 += r0[1] * ktmp[3];
sum4 += r0[1] * ktmp[4];
sum5 += r0[1] * ktmp[5];
sum6 += r0[1] * ktmp[6];
sum7 += r0[1] * ktmp[7];
ktmp += 8;
sum0 += r0[2] * ktmp[0];
sum1 += r0[2] * ktmp[1];
sum2 += r0[2] * ktmp[2];
sum3 += r0[2] * ktmp[3];
sum4 += r0[2] * ktmp[4];
sum5 += r0[2] * ktmp[5];
sum6 += r0[2] * ktmp[6];
sum7 += r0[2] * ktmp[7];
ktmp += 8;
sum0 += r1[0] * ktmp[0];
sum1 += r1[0] * ktmp[1];
sum2 += r1[0] * ktmp[2];
sum3 += r1[0] * ktmp[3];
sum4 += r1[0] * ktmp[4];
sum5 += r1[0] * ktmp[5];
sum6 += r1[0] * ktmp[6];
sum7 += r1[0] * ktmp[7];
ktmp += 8;
sum0 += r1[1] * ktmp[0];
sum1 += r1[1] * ktmp[1];
sum2 += r1[1] * ktmp[2];
sum3 += r1[1] * ktmp[3];
sum4 += r1[1] * ktmp[4];
sum5 += r1[1] * ktmp[5];
sum6 += r1[1] * ktmp[6];
sum7 += r1[1] * ktmp[7];
ktmp += 8;
sum0 += r1[2] * ktmp[0];
sum1 += r1[2] * ktmp[1];
sum2 += r1[2] * ktmp[2];
sum3 += r1[2] * ktmp[3];
sum4 += r1[2] * ktmp[4];
sum5 += r1[2] * ktmp[5];
sum6 += r1[2] * ktmp[6];
sum7 += r1[2] * ktmp[7];
ktmp += 8;
sum0 += r2[0] * ktmp[0];
sum1 += r2[0] * ktmp[1];
sum2 += r2[0] * ktmp[2];
sum3 += r2[0] * ktmp[3];
sum4 += r2[0] * ktmp[4];
sum5 += r2[0] * ktmp[5];
sum6 += r2[0] * ktmp[6];
sum7 += r2[0] * ktmp[7];
ktmp += 8;
sum0 += r2[1] * ktmp[0];
sum1 += r2[1] * ktmp[1];
sum2 += r2[1] * ktmp[2];
sum3 += r2[1] * ktmp[3];
sum4 += r2[1] * ktmp[4];
sum5 += r2[1] * ktmp[5];
sum6 += r2[1] * ktmp[6];
sum7 += r2[1] * ktmp[7];
ktmp += 8;
sum0 += r2[2] * ktmp[0];
sum1 += r2[2] * ktmp[1];
sum2 += r2[2] * ktmp[2];
sum3 += r2[2] * ktmp[3];
sum4 += r2[2] * ktmp[4];
sum5 += r2[2] * ktmp[5];
sum6 += r2[2] * ktmp[6];
sum7 += r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8*9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8*9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* ktmp = _kernel.channel(p/8 + p%8);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = ktmp;
const float* k1 = ktmp + 3;
const float* k2 = ktmp + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * ktmp[0];
sum += r0[1] * ktmp[1];
sum += r0[2] * ktmp[2];
sum += r1[0] * ktmp[3];
sum += r1[1] * ktmp[4];
sum += r1[2] * ktmp[5];
sum += r2[0] * ktmp[6];
sum += r2[1] * ktmp[7];
sum += r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
//////////////////BN RELU///////////////////////////
{
int size = top_blob.w * top_blob.h;
const float *a_data_ptr = a_data;
const float *b_data_ptr = b_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < top_blob.c; q++) {
{
float *ptr = top_blob.channel(q);
float a = a_data_ptr[q];
float b = b_data_ptr[q];
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v1.4s, %w4 \n"
"dup v2.4s, %w5 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"orr v3.16b, v1.16b, v1.16b \n"
"fmla v3.4s, v0.4s, v2.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v3.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"vdup.f32 q1, %4 \n"
"vdup.f32 q2, %5 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vorr.32 q3, q1, q1 \n"
"vmla.f32 q3, q0, q2 \n"
"subs %0, #1 \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"r"(a), // %4
"r"(b) // %5
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
ptr = top_blob.channel(q);
#if __ARM_NEON
nn = size >> 2;
remain = size - (nn << 2);
#else
remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(ptr);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(ptr, _p);
ptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"veor q1, q0, q0 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q0, q0, q1 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr)
: "cc", "memory", "q0", "q1"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--) {
*ptr = b * *ptr + a;
*ptr = std::max(*ptr, 0.f);
ptr++;
}
}
}
}
}
|
DRB059-lastprivate-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Using lastprivate() to resolve an output dependence.
Semantics of lastprivate (x):
causes the corresponding original list item to be updated after the end of the region.
The compilerruntime copies the local value back to the shared one within the last iteration.
*/
#include <stdio.h>
void foo()
{
int i, x;
#pragma cetus private(i)
#pragma cetus lastprivate(x)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i) lastprivate(x)
for (i=0; i<100; i ++ )
{
x=i;
}
printf("x=%d", x);
return ;
}
int main()
{
int _ret_val_0;
foo();
_ret_val_0=0;
return _ret_val_0;
}
|
GB_binop__gt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int8)
// A*D function (colscale): GB (_AxD__gt_int8)
// D*A function (rowscale): GB (_DxB__gt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int8)
// C=scalar+B GB (_bind1st__gt_int8)
// C=scalar+B' GB (_bind1st_tran__gt_int8)
// C=A+scalar GB (_bind2nd__gt_int8)
// C=A'+scalar GB (_bind2nd_tran__gt_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_INT8 || GxB_NO_GT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(CastDoubleToLong(floor(x)));
return(CastDoubleToLong(ceil(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
crop_image=NewImageList();
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=(Image *) NULL;
crop_image=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
if (status != MagickFalse)
Update8BIMClipPath(extent_image,image->columns,image->rows,geometry);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
Image
*trim_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
trim_image=CropImage(image,&geometry,exception);
if (trim_image != (Image *) NULL)
Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry);
return(trim_image);
}
|
3.c | /* The Computer Language Benchmarks Game
* https://salsa.debian.org/benchmarksgame-team/benchmarksgame/
*
* Contributed by Mr Ledrug
*
* Algorithm lifted from Intel Fortran #2 code by Steve Decker et al.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
inline int A(int i, int j) { return ((i + j) * (i + j + 1) / 2 + i + 1); }
double dot(double *v, double *u, int n) {
int i;
double sum = 0;
for (i = 0; i < n; i++)
sum += v[i] * u[i];
return sum;
}
void mult_Av(double *v, double *out, const int n) {
int i, j;
double sum;
#pragma omp parallel for private(sum, j)
for (i = 0; i < n; i++) {
for (sum = j = 0; j < n; j++)
sum += v[j] / A(i, j);
out[i] = sum;
}
}
void mult_Atv(double *v, double *out, const int n) {
int i, j;
double sum;
#pragma omp parallel for private(sum, j)
for (i = 0; i < n; i++) {
for (sum = j = 0; j < n; j++)
sum += v[j] / A(j, i);
out[i] = sum;
}
}
double *tmp;
void mult_AtAv(double *v, double *out, const int n) {
mult_Av(v, tmp, n);
mult_Atv(tmp, out, n);
}
int main(int argc, char **argv) {
int n = atoi(argv[1]);
if (n <= 0)
n = 2000;
double *u, *v;
u = malloc(n * sizeof(double));
v = malloc(n * sizeof(double));
tmp = malloc(n * sizeof(double));
int i;
for (i = 0; i < n; i++)
u[i] = 1;
for (i = 0; i < 10; i++) {
mult_AtAv(u, v, n);
mult_AtAv(v, u, n);
}
printf("%.9f\n", sqrt(dot(u, v, n) / dot(v, v, n)));
return 0;
} |
DRB084-threadprivatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
Data race pairs sum0@61:3 vs. sum0@61:8
sum0@61:3 vs. sum0@61:3
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
//#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
int i, sum=0;
for (i=1;i<=1000;i++)
{
foo (i);
}
sum= sum+sum0;
/* reference calculation */
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
return 0;
}
|
partial.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
/*HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;*/
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
/*HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter, coarse_counter_offd; */
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
/*HYPRE_Int strong_f_marker = -2;*/
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i;
/*HYPRE_Int i, ii, i1, i2, j, jj, kk, k1, jj1;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Int max_num_threads;
HYPRE_Int *P_diag_array = NULL;
HYPRE_Int *P_offd_array = NULL;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
max_num_threads = hypre_NumThreads();
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
/*P_marker = hypre_CTAlloc(HYPRE_Int, n_fine); */
}
if (full_off_procNodes)
{
/*P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);*/
for (i=0; i < full_off_procNodes; i++)
{
fine_to_coarse_offd[i] = -1;
tmp_CF_marker_offd[i] = -1;
}
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
P_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
P_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads+1, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, diagonal, distribute, sgn, sum)
#endif
{
HYPRE_Int ii, jj_counter, jj_counter_offd, jj, kk, i1, i2, k1, jj1;
HYPRE_BigInt big_k1;
HYPRE_Int loc_col, jj_begin_row, jj_begin_row_offd;
HYPRE_Int jj_end_row, jj_end_row_offd, strong_f_marker;
HYPRE_Int size, rest, ne, ns;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
strong_f_marker = -2;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = n_coarse_old/num_threads;
rest = n_coarse_old - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size+1);
ne = (my_thread_num+1)*(size+1);
}
else
{
ns = my_thread_num*size+rest;
ne = (my_thread_num+1)*size+rest;
}
if (n_fine) P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (ii=0; ii < n_fine; ii++)
P_marker[ii] = -1;
if (full_off_procNodes) P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (ii=0; ii < full_off_procNodes; ii++)
P_marker_offd[ii] = -1;
/*coarse_counter = 0;
coarse_counter_offd = 0;*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;*/
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
/*coarse_counter++;*/
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
P_diag_array[my_thread_num] = jj_counter;
P_offd_array[my_thread_num] = jj_counter_offd;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
for (i=0; i < max_num_threads; i++)
{
P_diag_array[i+1] += P_diag_array[i];
P_offd_array[i+1] += P_offd_array[i];
}
P_diag_size = P_diag_array[max_num_threads];
P_offd_size = P_offd_array[max_num_threads];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = P_diag_size;
P_offd_i[n_coarse_old] = P_offd_size;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (my_thread_num)
{
jj_counter = P_diag_array[my_thread_num-1];
jj_counter_offd = P_offd_array[my_thread_num-1];
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = ns; ii < ne; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
P_diag_i[ii] = jj_counter;
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
} /* end parallel region */
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_array, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PARTIAL_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa, beta;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, j1, jj, kk, k1;
HYPRE_BigInt big_k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] > 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] > 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal != 0.0) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal != 0.0) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal != 0.0) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildPartialExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildPartialExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_BigInt *num_old_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *old_coarse_to_fine = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int n_coarse_old;
HYPRE_BigInt total_old_global_cpts;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int cnt, old_cnt;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, ii, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
/*my_first_old_cpt = num_old_cpts_global[0];*/
n_coarse_old = (HYPRE_Int)(num_old_cpts_global[1] - num_old_cpts_global[0]);
/*n_coarse = num_cpts_global[1] - num_cpts_global[0];*/
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
total_old_global_cpts = num_old_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
hypre_MPI_Bcast(&total_old_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
if (hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1))
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_coarse_old+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
old_coarse_to_fine = hypre_CTAlloc(HYPRE_Int, n_coarse_old, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
cnt = 0;
old_cnt = 0;
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] = -1;
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt++;
old_coarse_to_fine[old_cnt++] = i;
}
else if (CF_marker[i] == -2)
{
old_coarse_to_fine[old_cnt++] = i;
}
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
P_diag_i[ii] = jj_counter;
if (num_procs > 1)
P_offd_i[ii] = jj_counter_offd;
i = old_coarse_to_fine[ii];
if (CF_marker[i] > 0)
{
jj_counter++;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[ii])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[ii])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[ii])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
if(P_marker_offd[i1] < P_offd_i[ii])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[ii])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[ii])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_coarse_old] = jj_counter;
P_offd_i[n_coarse_old] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (ii = 0; ii < n_coarse_old; ii++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
i = old_coarse_to_fine[ii];
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] == -2)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd &&
(sgn*A_ext_data[jj1]) < 0)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
total_old_global_cpts,
total_global_cpts,
num_old_cpts_global,
num_cpts_global,
0,
P_diag_i[n_coarse_old],
P_offd_i[n_coarse_old]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_coarse_old];
P_offd_size = P_offd_i[n_coarse_old];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] < -1) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(old_coarse_to_fine, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
tests.c | #include "tests.h"
#include "linalg.h"
#include "projector.h"
#include "reader.h"
#include "sbt.h"
#include "utils.h"
#include <assert.h>
#include <complex.h>
#include <math.h>
#include <mkl.h>
#include <mkl_types.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.14159265359
double Ylmr(int l, int m, double theta, double phi) {
return creal(Ylm(l, m, theta, phi));
}
double Ylmi(int l, int m, double theta, double phi) {
return cimag(Ylm(l, m, theta, phi));
}
double *get_sbtd_ks(sbt_descriptor_t *d) { return d->ks; }
int fft_check(char *wavecar, double *kpt_weights, int *fftg) {
setbuf(stdout, NULL);
pswf_t *wf = read_wavefunctions(wavecar, kpt_weights);
double complex *x = (double complex *)mkl_calloc(fftg[0] * fftg[1] * fftg[2],
sizeof(double complex), 64);
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[0]->k, wf->kpts[0]->Gs,
wf->kpts[0]->bands[0]->Cs, wf->kpts[0]->bands[0]->num_waves, fftg);
int *Gs = wf->kpts[0]->Gs;
float complex *Cs = wf->kpts[0]->bands[0]->Cs;
double inv_sqrt_vol = pow(determinant(wf->lattice), -0.5);
double total1 = 0;
double total2 = 0;
double total3 = 0;
for (int i = 0; i < fftg[0]; i++) {
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
double f1 = (double)i / fftg[0];
double f2 = (double)j / fftg[1];
double f3 = (double)k / fftg[2];
double complex temp = 0;
for (int w = 0; w < wf->kpts[0]->bands[0]->num_waves; w++) {
temp += Cs[w] * cexp((f1 * (Gs[3 * w]) + f2 * (Gs[3 * w + 1]) +
f3 * (Gs[3 * w + 2])) *
2 * PI * I);
if (i == 0 && j == 0 && k == 0)
total3 += pow(cabs(Cs[w]), 2);
}
temp *= inv_sqrt_vol;
int ind = i * fftg[1] * fftg[2] + j * fftg[2] + k;
total1 += pow(cabs(x[ind]), 2);
total2 += pow(cabs(temp), 2);
if (cabs(x[ind] - temp) > 1e-5)
return -1;
}
}
}
printf("FFTCHECK ASSERTS\n");
float complex *CAs =
(float complex *)calloc(wf->kpts[0]->num_waves, sizeof(float complex));
fwd_fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[0]->k, wf->kpts[0]->Gs, CAs,
wf->kpts[0]->bands[0]->num_waves, fftg);
for (int w = 0; w < wf->kpts[0]->num_waves; w++) {
if (cabs(CAs[w] - wf->kpts[0]->bands[0]->Cs[w]) > 1e-5)
return -2;
}
free(CAs);
mkl_free(x);
return 0;
}
void proj_check(int BAND_NUM, int KPOINT_NUM, pswf_t *wf, int *fftg,
int *labels, double *coords) {
ppot_t *pps = wf->pps;
double complex *x =
mkl_calloc(fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64);
// printf("START FT\n");
fft3d(x, wf->G_bounds, wf->lattice, wf->kpts[KPOINT_NUM]->k,
wf->kpts[KPOINT_NUM]->Gs, wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->Cs,
wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->num_waves, fftg);
// printf("FINISH FT\n");
double *lattice = wf->lattice;
double vol = determinant(lattice);
double dv = vol / (fftg[0] * fftg[1] * fftg[2]);
for (int i = 0; i < fftg[0]; i++) {
double frac[3] = {0, 0, 0};
double kdotr = 0;
for (int j = 0; j < fftg[1]; j++) {
for (int k = 0; k < fftg[2]; k++) {
frac[0] = (double)i / fftg[0];
frac[1] = (double)j / fftg[1];
frac[2] = (double)k / fftg[2];
kdotr = dot(wf->kpts[KPOINT_NUM]->k, frac);
x[i * fftg[1] * fftg[2] + j * fftg[2] + k] *= cexp(2 * PI * I * kdotr);
}
}
}
double complex *y = (double complex *)malloc(fftg[0] * fftg[1] * fftg[2] *
sizeof(double complex));
memcpy(y, x, fftg[0] * fftg[1] * fftg[2] * sizeof(double complex));
double err = 0, err2 = 0;
double normx = 0, normy = 0;
int num_sites = wf->num_sites;
#pragma omp parallel for
for (int p = 0; p < num_sites; p++) {
int ind;
double serr = 0, serr2 = 0;
double snormx = 0, snormy = 0;
projection_t pros = wf->kpts[KPOINT_NUM]->bands[BAND_NUM]->projections[p];
// printf("READ PROJECTIONS\n");
ppot_t pp = pps[labels[p]];
double rmax = pp.wave_grid[pp.wave_gridsize - 1];
double res[3] = {0, 0, 0};
vcross(res, lattice + 3, lattice + 6);
int grid1 = (int)(mag(res) * rmax / vol * fftg[0]) + 1;
vcross(res, lattice + 0, lattice + 6);
int grid2 = (int)(mag(res) * rmax / vol * fftg[1]) + 1;
vcross(res, lattice + 0, lattice + 3);
int grid3 = (int)(mag(res) * rmax / vol * fftg[2]) + 1;
int center1 = (int)round(coords[3 * p + 0] * fftg[0]);
int center2 = (int)round(coords[3 * p + 1] * fftg[1]);
int center3 = (int)round(coords[3 * p + 2] * fftg[2]);
// printf("FINISH SETUP %d\n%d %d %d\n%d %d %d\n",p, center1, center2,
// center3, grid1, grid2, grid3);
for (int i = -grid1 + center1; i <= grid1 + center1; i++) {
double frac[3] = {0, 0, 0};
double testcoord[3] = {0, 0, 0};
int ii = 0, jj = 0, kk = 0;
double phasecoord[3] = {0, 0, 0};
double phase = 0;
for (int j = -grid2 + center2; j <= grid2 + center2; j++) {
for (int k = -grid3 + center3; k <= grid3 + center3; k++) {
testcoord[0] = (double)i / fftg[0] - coords[3 * p + 0];
testcoord[1] = (double)j / fftg[1] - coords[3 * p + 1];
testcoord[2] = (double)k / fftg[2] - coords[3 * p + 2];
frac_to_cartesian(testcoord, lattice);
if (mag(testcoord) < rmax) {
ii = (i % fftg[0] + fftg[0]) % fftg[0];
jj = (j % fftg[1] + fftg[1]) % fftg[1];
kk = (k % fftg[2] + fftg[2]) % fftg[2];
frac[0] = (double)ii / fftg[0];
frac[1] = (double)jj / fftg[1];
frac[2] = (double)kk / fftg[2];
phasecoord[0] = coords[3 * p + 0] + ((ii - i) / fftg[0]);
phasecoord[1] = coords[3 * p + 1] + ((jj - j) / fftg[1]);
phasecoord[2] = coords[3 * p + 2] + ((kk - k) / fftg[2]);
phase = dot(phasecoord, wf->kpts[KPOINT_NUM]->k);
ind = ii * fftg[1] * fftg[2] + jj * fftg[2] + kk;
x[ii * fftg[1] * fftg[2] + jj * fftg[2] + kk] = 0;
for (int n = 0; n < pros.total_projs; n++) {
x[ii * fftg[1] * fftg[2] + jj * fftg[2] + kk] +=
wave_value2(pp.wave_grid, pp.funcs[pros.ns[n]].pswave,
pp.funcs[pros.ns[n]].pswave_spline,
pp.wave_gridsize, pros.ls[n], pros.ms[n],
testcoord) *
pros.overlaps[n] * cexp(2 * PI * I * phase);
}
serr += pow(cabs(x[ind] - y[ind]), 2);
serr2 += pow(cabs(x[ind]) - cabs(y[ind]), 2);
snormx += pow(cabs(x[ind]), 2);
snormy += pow(cabs(y[ind]), 2);
}
}
}
}
#pragma omp critical
{
err += serr;
err2 += serr2;
normx += snormx;
normy += snormy;
}
}
printf("err magerr, normx normy %lf %lf %lf %lf\n", err / normy, err2 / normy,
normx, normy);
mkl_free(x);
free(y);
}
|
shape.h | /*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/sharedmem.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD int sizeAt(Nd4jLong *shape, int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder, Nd4jLong* target);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output);
//ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer);
#ifdef __CUDACC__
template <typename T>
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager);
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return -1 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return -1 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank( Nd4jLong *buffer);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape,int index,int numIndices);
ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape,int index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape,int index,int numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, int index, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
ND4J_EXPORT _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo,int index,int numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo,int index,Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(Nd4jLong *arr,int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// return absolute index of array min, min is sub-array of max, index to be returned is min index and corresponds to maxIdx of max array
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx);
ND4J_EXPORT _CUDA_HD void shapeScalar(Nd4jLong* const buffer);
ND4J_EXPORT _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer);
ND4J_EXPORT _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
template <typename T>
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) {
// if we go for 3 dimensions coord space or below - just use shared memory for that
if (size <= MAX_COORD * 4) {
Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD);
return ptr;
} else {
// otherwise go to preallocated global memory :(
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid * size > PREALLOC_SIZE - size) {
return (Nd4jLong *) malloc(size);
} else {
Nd4jLong *ret = buffer;
ret += (tid * size);
return ret;
}
}
}
#endif
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1),shape::shapeOf(shapeInfo1),shape::rank(shapeInfo2),shape::shapeOf(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength,retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
if (shape::isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
stride[i] = 1;
return stride;
}
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
if (shape::isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shape, const char order) {
int rank = shape[0];
int doubleRank = 2*rank;
if (rank > 0)
if(order == 'c') {
shape[doubleRank] = 1; // set unity as last stride for c order
for(int j=1; j<rank; ++j)
shape[doubleRank-j] = shape[doubleRank-j+1]*shape[rank+1-j];
}
else {
shape[rank+1] = 1; // set unity as first stride for f order
for(int j=rank+1; j<doubleRank; ++j)
shape[j+1] = shape[j]*shape[j-rank];
}
// set last 3 elements in shape
shape[doubleRank + 1] = 0;
shape[doubleRank + 2] = 1;
shape[doubleRank + 3] = (int)order;
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *olddims = shape::copyOf(rank, shape);
Nd4jLong *oldstrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < rank; oi++) {
if (shape[oi] != 1) {
olddims[oldnd] = shape[oi];
oldstrides[oldnd] = stride[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newShapeRank; ni++) {
np *= newShape[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newShapeRank && oi < oldnd) {
np = newShape[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return -1;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
ni = nj++;
oi = oj++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (ni >= 1) {
last_stride = newStrides[ni - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (ni >= 1)
last_stride *= newShape[ni - 1];
}
for (nk = ni; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldstrides;
delete[] olddims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return -1;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = shape::ind2sub(rank, shape, i);
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
INLINEDEF _CUDA_HD int sub2Ind(int rank, Nd4jLong *shape, Nd4jLong *indices) {
int index = 0;
int shift = 1;
for(int i = 0; i < rank; i++) {
index += shift * indices[i];
shift *= shape[i];
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
#pragma omp simd
for (int e = 0; e < length; e++)
buffer[e] = value;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, int index,int numIndices) {
traceNew(14);
auto ret = new Nd4jLong[rank];
ind2sub(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, int index) {
return ind2sub(rank,shape, index,shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, int index, int numIndices, Nd4jLong *ret) {
int denom = numIndices;
for(int i = rank - 1; i >= 0; i--) {
denom /= shape[i];
ret[i] = index / denom;
index %= denom;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape,int index, Nd4jLong *out) {
ind2sub(rank,shape, index,shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong * ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2subC(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong *ind2subC(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2subC(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
auto denom = numIndices;
for(int i = 0; i < rank; i++) {
denom /= shape[i];
if(denom > 0) {
ret[i] = index / denom;
index %= denom;
}
else
ret[i] = 0;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2subC(rank,shape, index,shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, int index, int numIndices,Nd4jLong *out) {
if(shape::order(shapeInfo) == 'f') {
shape::ind2sub(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
else {
shape::ind2subC(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, int index, Nd4jLong *out) {
ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer)));
doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
doPermuteShapeBuffer(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = -1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = -1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) {
//no swapping needs to happen
if(shape::isScalar(shapeBuffer)) {
return;
}
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rearrageRank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
// doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large
}
/*
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
auto shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer);
shapeRef[shapeInfoLength(rearrageRank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
}
*/
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange);
shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1);
delete[] rearrangeCopy1;
auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange);
shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2);
shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
shapeBuffer[shape::shapeInfoLength(rank) - 2] = -1;
delete[] rearrangeCopy2;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
if(shapeBuffer != tmpBuffer)
shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rank) - 2] = -1;
shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
if(shape::isMatrix(shapeBuffer)) {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1];
}
else {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer);
}
newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(shape[0]);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank( Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride( Nd4jLong *buffer) {
return buffer + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(Nd4jLong *shapeInfo) {
return ((shape::extra(shapeInfo) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
else
return 1L;
}
if (rank == 1)
return shapeInfo[1];
return shape::prodLong(shape::shapeOf(shapeInfo), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]);
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(Nd4jLong *buffer) {
return buffer[shapeInfoLength(buffer[0]) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(Nd4jLong *shapeA, Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD int sizeAt(Nd4jLong *shape, int dim) {
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(Nd4jLong *shapeA, Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(indices[i] >= shape[i] && shape[i] != 1) {
#ifdef __CUDA_ARCH__
printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]);
#else
printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]);
#endif
#ifdef __CUDA_ARCH__
if (threadIdx.x == 0 && blockIdx.x == 0)
printShapeInfoLinear("getOffsetFailed", rank, shape, stride);
#endif
return -1;
}
if(shape[i] != 1) {
offset += indices[i] * stride[i];
}
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return (int) nd4j::math::nd4j_ceil<double>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(Nd4jLong *arr,int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong( Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank,newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
int oldnd;
Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
Nd4jLong* newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < oldRank; oi++) {
if (shape::shapeOf(oldShape)[oi] != 1) {
olddims[oldnd] = shape::shapeOf(oldShape)[oi];
oldstrides[oldnd] = shape::stride(oldShape)[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newRank; ni++) {
np *= newShapeOf[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newRank && oi < oldnd) {
np = newShapeOf[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
ni = nj++;
oi = oj++;
}
if (ni >= 1) {
last_stride = newStrides[ni - 1];
} else {
last_stride = shape::elementWiseStride(oldShape);
}
if (isFOrder && ni >= 1) {
last_stride *= newShapeOf[ni - 1];
}
for (nk = ni; nk < newRank; nk++) {
newStrides[nk] = last_stride;
}
target[0] = newRank;
int cnt = 1;
for (int e = 0; e < newRank; e++)
target[cnt++] = newShapeOf[e];
for (int e = 0; e < newRank; e++)
target[cnt++] = newStrides[e];
target[shape::shapeInfoLength(newRank) - 3] = 0;
target[shape::shapeInfoLength(newRank) - 2] = -1;
target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oi, oj, ok, ni, nj, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oi = 0; oi < oldRank; oi++) {
if (shape::shapeOf(oldShape)[oi] != 1) {
olddims[oldnd] = shape::shapeOf(oldShape)[oi];
oldstrides[oldnd] = shape::stride(oldShape)[oi];
oldnd++;
}
}
np = 1;
for (ni = 0; ni < newRank; ni++) {
np *= newShapeOf[ni];
}
op = 1;
for (oi = 0; oi < oldnd; oi++) {
op *= olddims[oi];
}
if (np != op) {
/* different total sizes; no hope */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
/* oi to oj and ni to nj give the axis ranges currently worked with */
oi = 0;
oj = 1;
ni = 0;
nj = 1;
while (ni < newRank && oi < oldnd) {
np = newShapeOf[ni];
op = olddims[oi];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[nj++];
} else {
op *= olddims[oj++];
}
}
/* Check whether the original axes can be combined */
for (ok = oi; ok < oj - 1; ok++) {
if (isFOrder) {
if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
/* not contiguous enough */
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[ni] = oldstrides[oi];
for (nk = ni + 1; nk < nj; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[nj - 1] = oldstrides[oj - 1];
for (nk = nj - 1; nk > ni; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
ni = nj++;
oi = oj++;
}
delete[] olddims;
delete[] oldstrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// return absolute index of array min, min is sub-array of max, index to be returned is min's index and corresponds to maxIdx of max array
INLINEDEF _CUDA_H Nd4jLong subArrayIndex(const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int maxIdx) {
const int rankMax = maxShapeInfo[0];
const int rankMin = minShapeInfo[0];
auto* idxPerRank = new Nd4jLong[rankMax];
ind2subC(rankMax, const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<int&>(maxIdx), idxPerRank);
Nd4jLong minIdx = 0;
for(int i = 0; i < rankMin; ++i) {
if(minShapeInfo[rankMin - i] == 1 || idxPerRank[rankMax - i - 1] == 0)
continue;
if(idxPerRank[rankMax - i - 1] >= minShapeInfo[rankMin - i])
idxPerRank[rankMax - i - 1] %= minShapeInfo[rankMin - i];
minIdx += idxPerRank[rankMax - i - 1] * stride(const_cast<Nd4jLong*>(minShapeInfo))[rankMin - i - 1];
}
delete[] idxPerRank;
return minIdx;
}
INLINEDEF _CUDA_HD void shapeScalar(Nd4jLong* const buffer) {
buffer[0] = 0;
buffer[1] = 0;
buffer[2] = 1;
buffer[3] = 99;
}
INLINEDEF _CUDA_HD void shapeOldScalar(Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[5] = 0;
buffer[6] = 1;
buffer[7] = (int)order;
}
INLINEDEF _CUDA_HD void shapeVector(const Nd4jLong length, Nd4jLong* const buffer) {
buffer[0] = 1;
buffer[1] = length;
buffer[2] = 1;
buffer[3] = 0;
buffer[4] = 1;
buffer[5] = 99;
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
}
#endif /* SHAPE_H_ */
|
convolution_3x3_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_packn_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)4u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd63_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_packn_rvv(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row<float>(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(r0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(r0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row<const float>(i / 8);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
float val4 = *r0++;
float val5 = *r0++;
float val6 = *r0++;
float val7 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
vse32_v_f32m1(output0_tm + packn * 4, _sum4, vl);
vse32_v_f32m1(output0_tm + packn * 5, _sum5, vl);
vse32_v_f32m1(output0_tm + packn * 6, _sum6, vl);
vse32_v_f32m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_packn_rvv(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_packn_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)4u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (float)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd43_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_packn_rvv(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row<float>(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(r0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(r0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(r0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row<float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat32m1_t _val = vle32_v_f32m1(r0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row<const float>(i / 8);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
float val4 = *r0++;
float val5 = *r0++;
float val6 = *r0++;
float val7 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
vse32_v_f32m1(output0_tm + packn * 4, _sum4, vl);
vse32_v_f32m1(output0_tm + packn * 5, _sum5, vl);
vse32_v_f32m1(output0_tm + packn * 6, _sum6, vl);
vse32_v_f32m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
float val2 = *r0++;
float val3 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
vse32_v_f32m1(output0_tm + packn * 2, _sum2, vl);
vse32_v_f32m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *r0++;
float val1 = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum0, vl);
vse32_v_f32m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * packn; // inch always > 0
vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
float val = *r0++;
vfloat32m1_t _w0 = vle32_v_f32m1(k0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
k0 += packn;
}
vse32_v_f32m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_packn_rvv(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
variational_distance_calculation_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Ruben Zorrilla
//
//
#if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED )
#define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "containers/model.h"
#include "includes/kratos_flags.h"
#include "elements/distance_calculation_element_simplex.h"
#include "linear_solvers/linear_solver.h"
#include "processes/process.h"
#include "modeler/connectivity_preserve_modeler.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "solving_strategies/strategies/residualbased_linear_strategy.h"
#include "utilities/variable_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function
mantaining as much as possible the position of the zero of the function prior to the call.
This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2
with the restriction that "distance" is a finite elment function
*/
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver >
class VariationalDistanceCalculationProcess : public Process
{
public:
KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1);
KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS);
KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE);
///@name Type Definitions
///@{
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typedef typename SchemeType::Pointer SchemePointerType;
typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType;
typedef ImplicitSolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType;
///@}
///@name Pointer Definitions
/// Pointer definition of VariationalDistanceCalculationProcess
KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess);
///@}
///@name Life Cycle
///@{
/**This process recomputed the distance function mantaining the zero of the existing distance distribution
* for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain
* alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance
* respecting that zero
* @param base_model_parr - is the model part on the top of which the calculation will be performed
* @param plinear_solver - linear solver to be used internally
* @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process.
* - can also be set to 0 if a (very) rough approximation is enough
*
* EXAMPLE OF USAGE FROM PYTHON:
*
class distance_linear_solver_settings:
solver_type = "AMGCL"
tolerance = 1E-3
max_iteration = 200
scaling = False
krylov_type = "CG"
smoother_type = "SPAI0"
verbosity = 0
import linear_solver_factory
distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings)
max_iterations=1
distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations)
distance_calculator.Execute()
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(pLinearSolver);
InitializeSolutionStrategy(p_builder_solver);
KRATOS_CATCH("")
}
/// Constructor with custom Builder And Solver
/** To be used in the trilinos version, since the trilinos builder and
* solver needs additional data (the EpetraComm).
* @param rBaseModelPart Reference ModelPart for distance calculation.
* @param pLinearSolver Linear solver for the distance system.
* @param MaxIterations Maximum number of non-linear optimization iterations.
* @param Options Configuration flags for the procedure.
* @param AuxPartName Name to be used for the internal distance calculation ModelPart.
*/
VariationalDistanceCalculationProcess(
ModelPart& rBaseModelPart,
typename TLinearSolver::Pointer pLinearSolver,
BuilderSolverPointerType pBuilderAndSolver,
unsigned int MaxIterations = 10,
Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(),
std::string AuxPartName = "RedistanceCalculationPart" )
:
mDistancePartIsInitialized(false),
mMaxIterations(MaxIterations),
mrModel( rBaseModelPart.GetModel() ),
mrBaseModelPart (rBaseModelPart),
mOptions( Options ),
mAuxModelPartName( AuxPartName )
{
KRATOS_TRY
ValidateInput();
// Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex
ReGenerateDistanceModelPart(rBaseModelPart);
InitializeSolutionStrategy(pBuilderAndSolver);
KRATOS_CATCH("")
}
/// Destructor.
~VariationalDistanceCalculationProcess() override
{
Clear();
};
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
void Execute() override
{
KRATOS_TRY;
if(mDistancePartIsInitialized == false){
ReGenerateDistanceModelPart(mrBaseModelPart);
}
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
// TODO: check flag PERFORM_STEP1
// Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1);
// Unfix the distances
const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
double& fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
// Free the DISTANCE values
fix_flag = 1.0;
it_node->Free(DISTANCE);
// Save the distances
it_node->SetValue(DISTANCE, d);
if(d == 0){
d = 1.0e-15;
fix_flag = -1.0;
it_node->Fix(DISTANCE);
} else {
if(d > 0.0){
d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances
} else {
d = -1.0e15;
}
}
}
const int nelem = static_cast<int>(r_distance_model_part.NumberOfElements());
#pragma omp parallel for
for(int i_elem = 0; i_elem < nelem; ++i_elem){
auto it_elem = r_distance_model_part.ElementsBegin() + i_elem;
array_1d<double,TDim+1> distances;
auto& geom = it_elem->GetGeometry();
for(unsigned int i=0; i<TDim+1; i++){
distances[i] = geom[i].GetValue(DISTANCE);
}
const array_1d<double,TDim+1> original_distances = distances;
// The element is cut by the interface
if(this->IsSplit(distances)){
// Compute the unsigned distance using GeometryUtils
if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) {
GeometryUtils::CalculateExactDistancesToPlane(geom, distances);
}
else {
if(TDim==3){
GeometryUtils::CalculateTetrahedraDistances(geom, distances);
}
else {
GeometryUtils::CalculateTriangleDistances(geom, distances);
}
}
// Assign the sign using the original distance values
for(unsigned int i = 0; i < TDim+1; ++i){
if(original_distances[i] < 0){
distances[i] = -distances[i];
}
}
for(unsigned int i = 0; i < TDim+1; ++i){
double &d = geom[i].FastGetSolutionStepValue(DISTANCE);
double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE);
geom[i].SetLock();
if(std::abs(d) > std::abs(distances[i])){
d = distances[i];
}
fix_flag = -1.0;
geom[i].Fix(DISTANCE);
geom[i].UnSetLock();
}
}
}
// SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS...
this->SynchronizeFixity();
this->SynchronizeDistance();
// Compute the maximum and minimum distance for the fixed nodes
double max_dist = 0.0;
double min_dist = 0.0;
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->IsFixed(DISTANCE)){
const double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d > max_dist){
max_dist = d;
}
if(d < min_dist){
min_dist = d;
}
}
}
// Synchronize the maximum and minimum distance values
const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator();
max_dist = r_communicator.MaxAll(max_dist);
min_dist = r_communicator.MinAll(min_dist);
// Assign the max dist to all of the non-fixed positive nodes
// and the minimum one to the non-fixed negatives
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(!it_node->IsFixed(DISTANCE)){
double& d = it_node->FastGetSolutionStepValue(DISTANCE);
if(d>0){
d = max_dist;
} else {
d = min_dist;
}
}
}
mpSolvingStrategy->Solve();
// Step2 - minimize the target residual
r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2);
for(unsigned int it = 0; it<mMaxIterations; it++){
mpSolvingStrategy->Solve();
}
// Unfix the distances
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = (r_distance_model_part.NodesBegin()) + i_node;
it_node->Free(DISTANCE);
}
KRATOS_CATCH("")
}
void Clear() override
{
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
mDistancePartIsInitialized = false;
mpSolvingStrategy->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "VariationalDistanceCalculationProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "VariationalDistanceCalculationProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mDistancePartIsInitialized;
unsigned int mMaxIterations;
Model& mrModel;
ModelPart& mrBaseModelPart;
Flags mOptions;
std::string mAuxModelPartName;
typename SolvingStrategyType::UniquePointer mpSolvingStrategy;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void ValidateInput()
{
const DataCommunicator& r_comm = mrBaseModelPart.GetCommunicator().GetDataCommunicator();
int num_elements = mrBaseModelPart.NumberOfElements();
int num_nodes = mrBaseModelPart.NumberOfNodes();
if (num_elements > 0)
{
const auto geometry_family = mrBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily();
KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::Kratos_Triangle) )
<< "In 2D the element type is expected to be a triangle." << std::endl;
KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::Kratos_Tetrahedra) )
<< "In 3D the element type is expected to be a tetrahedron" << std::endl;
}
KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl;
KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl;
// Check that required nodal variables are present
VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mrBaseModelPart.Nodes());
VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mrBaseModelPart.Nodes());
}
void InitializeSolutionStrategy(BuilderSolverPointerType pBuilderAndSolver)
{
// Generate a linear strategy
auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >();
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
bool CalculateReactions = false;
bool ReformDofAtEachIteration = false;
bool CalculateNormDxFlag = false;
mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >(
r_distance_model_part,
p_scheme,
pBuilderAndSolver,
CalculateReactions,
ReformDofAtEachIteration,
CalculateNormDxFlag);
// TODO: check flag DO_EXPENSIVE_CHECKS
mpSolvingStrategy->Check();
}
virtual void ReGenerateDistanceModelPart(ModelPart& rBaseModelPart)
{
KRATOS_TRY
if(mrModel.HasModelPart( mAuxModelPartName ))
mrModel.DeleteModelPart( mAuxModelPartName );
// Ensure that the nodes have distance as a DOF
VariableUtils().AddDof<Variable<double> >(DISTANCE, rBaseModelPart);
// Generate
ModelPart& r_distance_model_part = mrModel.CreateModelPart( mAuxModelPartName );
Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >();
r_distance_model_part.GetNodalSolutionStepVariablesList() = rBaseModelPart.GetNodalSolutionStepVariablesList();
ConnectivityPreserveModeler modeler;
modeler.GenerateModelPart(rBaseModelPart, r_distance_model_part, *p_distance_element);
// Using the conditions to mark the boundary with the flag boundary
// Note that we DO NOT add the conditions to the model part
VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes());
// Note that above we have assigned the same geometry. Thus the flag is
// set in the distance model part despite we are iterating the base one
for (auto it_cond = rBaseModelPart.ConditionsBegin(); it_cond != rBaseModelPart.ConditionsEnd(); ++it_cond){
Geometry< Node<3> >& geom = it_cond->GetGeometry();
for(unsigned int i=0; i<geom.size(); i++){
geom[i].Set(BOUNDARY,true);
}
}
rBaseModelPart.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY);
mDistancePartIsInitialized = true;
KRATOS_CATCH("")
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
bool IsSplit(const array_1d<double,TDim+1> &rDistances){
unsigned int positives = 0, negatives = 0;
for(unsigned int i = 0; i < TDim+1; ++i){
if(rDistances[i] >= 0){
++positives;
} else {
++negatives;
}
}
if (positives > 0 && negatives > 0){
return true;
}
return false;
}
void SynchronizeDistance(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Set the distance absolute value
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE));
}
// Synchronize the unsigned value to minimum
r_communicator.SynchronizeCurrentDataToMin(DISTANCE);
// Set the distance sign again by retrieving it from the non-historical database
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
if(it_node->GetValue(DISTANCE) < 0.0){
it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE);
}
}
}
}
void SynchronizeFixity(){
ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName );
auto &r_communicator = r_distance_model_part.GetCommunicator();
// Only required in the MPI case
if(r_communicator.TotalProcesses() != 1){
int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes());
// Synchronize the fixity flag variable to minium
// (-1.0 means fixed and 1.0 means free)
r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE);
// Set the fixity according to the synchronized flag
#pragma omp parallel for
for(int i_node = 0; i_node < nnodes; ++i_node){
auto it_node = r_distance_model_part.NodesBegin() + i_node;
const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE);
if (r_fix_flag == -1.0){
it_node->Fix(DISTANCE);
}
}
}
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther);
/// Copy constructor.
//VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther);
///@}
}; // Class VariationalDistanceCalculationProcess
//avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1));
template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver >
const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2));
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::istream& operator >> (std::istream& rIStream,
VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis);
/// output stream function
template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver>
inline std::ostream& operator << (std::ostream& rOStream,
const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
|
x86_test_conv_common_utils.h | /* Copyright (c) 2016 Anakin Authors All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef ANAKIN_TEST_SABER_X86_TEST_CONV_COMMON_UTIL_H
#define ANAKIN_TEST_SABER_X86_TEST_CONV_COMMON_UTIL_H
template<DataType Dtype, typename LayoutType>
void compute_ref_conv_relu_fwd(
const std::vector<Tensor<X86, Dtype, LayoutType> *> inputs,
std::vector<Tensor<X86, Dtype, LayoutType> *> outputs,
ConvParam<Tensor<X86, Dtype, LayoutType>> *conv_param,
ActivationParam<Tensor<X86, Dtype, LayoutType>> *act_param){
typedef typename Tensor<X86, Dtype, LayoutType>::Dtype dtype;
auto src_data = reinterpret_cast<const dtype*>(inputs[0]->get_buf()-> get_data());
auto dst_data_ref = reinterpret_cast<dtype*>(outputs[0]->mutable_data());
auto weights_data = reinterpret_cast<const dtype*>(conv_param->weight()->get_buf()->get_data());
bool with_bias = conv_param->bias() ? true : false;
auto bias_data = reinterpret_cast<const dtype*>(conv_param -> bias() -> data());
Shape shape = conv_param->bias()->shape();
int mb_ = outputs[0] -> num();
int oc_ = outputs[0] -> channel();
int oh_ = outputs[0] -> height();
int ow_ = outputs[0] -> width();
int ic_ = inputs[0] -> channel();
int ih_ = inputs[0] -> height();
int iw_ = inputs[0] -> width();
int kh_ = conv_param -> weight() -> height();
int kw_ = conv_param -> weight() -> width();
int strh_ = conv_param -> stride_h;
int strw_ = conv_param -> stride_w;
int padh_ = conv_param -> pad_h;
int padw_ = conv_param -> pad_w;
int dilw_ = conv_param -> dilation_h;
int dilh_ = conv_param -> dilation_w;
dtype negative_slope = act_param -> negative_slope;
//#pragma omp parallel for collapse(4) schedule(static)
for (int n = 0; n < mb_; ++n) {
for (int oc = 0; oc < oc_; ++oc) {
for (int oh = 0; oh < oh_; ++oh) {
for (int ow = 0; ow < ow_; ++ow) {
int oidx = n * oc_ * oh_ * ow_
+ oc * oh_ * ow_ + oh * ow_ + ow;
dst_data_ref[oidx] = with_bias ? static_cast<dtype>(bias_data[oc]) : static_cast<dtype>(0);
for (int ic = 0; ic < ic_; ++ic){
for (int kh = 0; kh < kh_; ++kh) {
for (int kw = 0; kw < kw_; ++kw) {
int iw = ow * strw_ - padw_ + kw * ( dilw_);
int ih = oh * strh_ - padh_ + kh * ( dilh_);
if (iw < 0 || iw >= iw_) continue;
if (ih < 0 || ih >= ih_) continue;
int iidx = n * ic_ * ih_ * iw_ +
ic * ih_ * iw_ + ih * iw_ + iw;
int widx = oc * ic_ * kh_ * kw_ +
ic * kh_ * kw_ + kh * kw_ + kw;
dst_data_ref[oidx]
+= src_data[iidx]
* weights_data[widx];
}
}
}
if (dst_data_ref[oidx] < 0){
dst_data_ref[oidx] = static_cast<dtype>(
negative_slope * dst_data_ref[oidx]);
}
}
}
}
}
}
#endif //ANAKIN_TEST_SABER_X86_TEST_CONV_COMMON_UTIL_H |
mp4.c | //Transpose with critical section and no locks
#include<stdio.h>
#include<time.h>
#include<omp.h>
void main()
{
int a[5][5],b[5][5],c[5][5],temp=0,ch;
printf("Menu\n1.Express Mode\n2.Custom Mode\n");
printf("Enter your choice:");
scanf("%d",&ch);
if(ch == 1)
{
int l = 1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
a[i][j] = l;
b[i][j] = 1;
l++;
}
}
}else{
int k=1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
printf("Enter element %d of first matrix:",k);
scanf("%d",&a[i][j]);
k++;
}
}
k = 1;
for(int i=0;i<5;i++)
{
for(int j=0;j<5;j++)
{
printf("Enter element %d of second matrix:",k);
scanf("%d",&b[i][j]);
k++;
}
}
}
printf("\nThe First Matrix is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
printf("\nThe Second Matrix is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", b[i][j]);
}
printf("\n");
}
clock_t begin = clock();
#pragma omp parallel num_threads(5)
{
#pragma omp for
for(int i = 0; i < 5; i++)
{
int id = omp_get_thread_num();
for(int j = 0; j < i; j++)
{
#pragma omp atomic
{
temp = a[i][j];
a[i][j] = a[j][i];
a[j][i] = temp;
}
}
printf("Thread %d\n",id);
}
}
printf("\nTranspose of First Matrix:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
#pragma omp parallel num_threads(5)
{
#pragma omp for
for(int i = 0; i < 5;i++)
{
int id = omp_get_thread_num();
for(int j = 0; j < 5;j++)
{
c[i][j] = a[i][j] + b[i][j];
}
printf("Thread %d\n",id);
}
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Time used = %lfms",time_spent);
printf("\nSum Matrix Is:\n");
for(int i = 0; i < 5; i++)
{
for(int j = 0; j < 5; j++)
{
printf("%d\t", c[i][j]);
}
printf("\n");
}
}
|
openmp.c | /*
* Copyright (c) 2003, 2007-14 Matteo Frigo
* Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/* openmp.c: thread spawning via OpenMP */
#include "threads.h"
#if !defined(_OPENMP)
#error OpenMP enabled but not using an OpenMP compiler
#endif
int X(ithreads_init)(void)
{
return 0; /* no error */
}
/* Distribute a loop from 0 to loopmax-1 over nthreads threads.
proc(d) is called to execute a block of iterations from d->min
to d->max-1. d->thr_num indicate the number of the thread
that is executing proc (from 0 to nthreads-1), and d->data is
the same as the data parameter passed to X(spawn_loop).
This function returns only after all the threads have completed. */
void X(spawn_loop)(int loopmax, int nthr, spawn_function proc, void *data)
{
int block_size;
spawn_data d;
int i;
A(loopmax >= 0);
A(nthr > 0);
A(proc);
if (!loopmax) return;
/* Choose the block size and number of threads in order to (1)
minimize the critical path and (2) use the fewest threads that
achieve the same critical path (to minimize overhead).
e.g. if loopmax is 5 and nthr is 4, we should use only 3
threads with block sizes of 2, 2, and 1. */
block_size = (loopmax + nthr - 1) / nthr;
nthr = (loopmax + block_size - 1) / block_size;
THREAD_ON; /* prevent debugging mode from failing under threads */
#pragma omp parallel for private(d)
for (i = 0; i < nthr; ++i) {
d.max = (d.min = i * block_size) + block_size;
if (d.max > loopmax)
d.max = loopmax;
d.thr_num = i;
d.data = data;
proc(&d);
}
THREAD_OFF; /* prevent debugging mode from failing under threads */
}
void X(threads_cleanup)(void)
{
}
/* FIXME [Matteo Frigo 2015-05-25] What does "thread-safe"
mean for openmp? */
void X(threads_register_planner_hooks)(void)
{
}
|
GB_unop__asin_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asin_fp32_fp32
// op(A') function: GB_unop_tran__asin_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = asinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = asinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asin_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asin_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_stream,
kind_last = kind_stream
};
public:
SYCLIntegrationHeader(Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType,
SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Update the names of a kernel description based on its SyclKernel.
void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name,
StringRef StableName) {
auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) {
return KD.SyclKernel == SyclKernel;
});
assert(Itr != KernelDescs.end() && "Unknown kernel description");
Itr->updateKernelNames(Name, StableName);
}
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId = false;
bool CallsThisItem = false;
bool CallsThisNDItem = false;
bool CallsThisGroup = false;
};
// Kernel invocation descriptor
struct KernelDesc {
/// sycl_kernel function associated with this kernel.
const FunctionDecl *SyclKernel;
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
// If we are in unnamed kernel/lambda mode AND this is one that the user
// hasn't provided an explicit name for.
bool IsUnnamedKernel;
KernelDesc(const FunctionDecl *SyclKernel, QualType NameType,
SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel)
: SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc),
IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel) {}
void updateKernelNames(StringRef Name, StringRef StableName) {
this->Name = Name.str();
this->StableName = StableName.str();
}
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The namespace where coroutine components are defined. In standard,
/// they are defined in std namespace. And in the previous implementation,
/// they are defined in std::experimental namespace.
NamespaceDecl *CoroTraitsNamespaceCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCountAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *
BuildSYCLIntelFPGAInitiationIntervalAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAMaxConcurrencyAttr *
BuildSYCLIntelFPGAMaxConcurrencyAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAMaxInterleavingAttr *
BuildSYCLIntelFPGAMaxInterleavingAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGASpeculatedIterationsAttr *
BuildSYCLIntelFPGASpeculatedIterationsAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGALoopCoalesceAttr *
BuildSYCLIntelFPGALoopCoalesceAttr(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false,
ArrayRef<const Expr *> StopAt = None);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; }
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
// Substitute auto in TypeWithAuto for a Dependent auto type
QualType SubstAutoTypeDependent(QualType TypeWithAuto);
// Substitute auto in TypeWithAuto for a Dependent auto type
TypeSourceInfo *
SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
for (const ExpressionEvaluationContextRecord &context :
llvm::reverse(ExprEvalContexts)) {
if (context.isImmediateFunctionContext())
return true;
if (context.isUnevaluated())
return false;
}
return false;
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddWorkGroupSizeHintAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDim, Expr *YDim, Expr *ZDim);
WorkGroupSizeHintAttr *
MergeWorkGroupSizeHintAttr(Decl *D, const WorkGroupSizeHintAttr &A);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
void AddSYCLIntelMaxGlobalWorkDimAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelMaxGlobalWorkDimAttr *
MergeSYCLIntelMaxGlobalWorkDimAttr(Decl *D,
const SYCLIntelMaxGlobalWorkDimAttr &A);
void AddIntelFPGABankWidthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGABankWidthAttr *
MergeIntelFPGABankWidthAttr(Decl *D, const IntelFPGABankWidthAttr &A);
void AddIntelFPGANumBanksAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGANumBanksAttr *
MergeIntelFPGANumBanksAttr(Decl *D, const IntelFPGANumBanksAttr &A);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
/// Lookup 'coroutine_traits' in std namespace and std::experimental
/// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc,
NamespaceDecl *&Namespace);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \param NumAppendArgs The number of omp_interop_t arguments to account for
/// in checking.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, unsigned NumAppendArgs,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
/// \param AdjustArgsNothing The list of 'nothing' arguments.
/// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
/// \param AppendArgs The list of 'append_args' arguments.
/// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
/// \param AppendArgsLoc The Location of an 'append_args' clause.
/// \param SR The SourceRange of the 'declare variant' directive.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs,
SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'align' clause.
OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Called on a well-formed 'bind' clause.
OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the type is allowed to be used for the current target.
void checkTypeSupport(QualType Ty, SourceLocation Loc,
ValueDecl *D = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
bool SemaBuiltinElementwiseMathOneArg(CallExpr *TheCall);
bool SemaBuiltinReduceMath(CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// We need to store the list of the sycl_kernel functions and their associated
// generated OpenCL Kernels so we can go back and re-name these after the
// fact.
llvm::SmallVector<std::pair<const FunctionDecl *, FunctionDecl *>>
SyclKernelsToOpenCLKernels;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclOpenCLKernel(const FunctionDecl *SyclKernel,
FunctionDecl *OpenCLKernel) {
SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel);
}
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(*this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void SetSYCLKernelNames();
void MarkDevices();
/// Get the number of fields or captures within the parsed type.
ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given field number so that callers
/// can wrap it in a decltype() to get the actual type of the field.
ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc,
QualType SourceTy, Expr *Idx);
/// Get the number of base classes within the parsed type.
ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given base number so that callers
/// can wrap it in a decltype() to get the actual type of the base class.
ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy,
Expr *Idx);
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &shp = (*in_attrs)[0];
if (!shape_is_known(shp)) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
struct RandomFlipParam : public dmlc::Parameter<RandomFlipParam> {
float p;
DMLC_DECLARE_PARAMETER(RandomFlipParam) {
DMLC_DECLARE_FIELD(p)
.set_default(0.5f)
.describe("The probablity of flipping the image.");
}
};
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomFlipParam ¶m = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
pr88203-3.c | /* PR c++/88203 */
/* { dg-do compile } */
/* { dg-additional-options "-std=c99" { target c } } */
/* { dg-additional-options "-std=c++11" { target c++ } } */
void foo (const char *);
#pragma omp declare target to (foo)
void
f1 (void)
{
#pragma omp parallel for lastprivate (__func__) /* { dg-error "'__func__' is predetermined 'shared' for 'lastprivate'" } */
for (int i = 0; i < 2; i++)
foo (__func__);
#pragma omp parallel private (__func__) /* { dg-error "'__func__' is predetermined 'shared' for 'private'" } */
foo (__func__);
}
void
f2 (void)
{
foo (__func__);
#pragma omp parallel default(none) private (__func__) /* { dg-error "'__func__' is predetermined 'shared' for 'private'" } */
foo (__func__);
#pragma omp parallel for default(none) lastprivate (__func__) /* { dg-error "'__func__' is predetermined 'shared' for 'lastprivate'" } */
for (int i = 0; i < 2; i++)
foo (__func__);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
simde-diagnostic.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
*/
/* SIMDe targets a very wide range of standards and compilers, and our
* goal is to compile cleanly even with extremely aggressive warnings
* (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.)
* treated as errors.
*
* While our preference is to resolve the underlying issue a given
* diagnostic is warning us about, sometimes that's not possible.
* Fixing a warning in one compiler may cause problems in another.
* Sometimes a warning doesn't really apply to us (false positives),
* and sometimes adhering to a warning would mean dropping a feature
* we *know* the compiler supports since we have tested specifically
* for the compiler or feature.
*
* When practical, warnings are only disabled for specific code. For
* a list of warnings which are enabled by default in all SIMDe code,
* see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the
* warning stack when SIMDe is done parsing, so code which includes
* SIMDe is not deprived of these warnings.
*/
#if !defined(SIMDE_DIAGNOSTIC_H)
#define SIMDE_DIAGNOSTIC_H
#include "hedley.h"
#include "simde-detect-clang.h"
/* This is only to help us implement functions like _mm_undefined_ps. */
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
#undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
#if HEDLEY_HAS_WARNING("-Wuninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,2,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"")
#elif HEDLEY_PGI_VERSION_CHECK(19,10,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)")
#elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)")
#elif \
HEDLEY_TI_VERSION_CHECK(16,9,9) || \
HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551")
#elif HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS)
#define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700))
#endif
/* GCC emits a lot of "notes" about the ABI being different for things
* in newer versions of GCC. We don't really care because all our
* functions are inlined and don't generate ABI. */
#if HEDLEY_GCC_VERSION_CHECK(7,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PSABI_
#endif
/* Since MMX uses x87 FP registers, you're supposed to call _mm_empty()
* after each MMX function before any floating point instructions.
* Some compilers warn about functions which use MMX functions but
* don't call _mm_empty(). However, since SIMDe is implementyng the
* MMX API we shouldn't be calling _mm_empty(); we leave it to the
* caller to invoke simde_mm_empty(). */
#if HEDLEY_INTEL_VERSION_CHECK(19,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)")
#elif defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_
#endif
/* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they
* emit a diagnostic if you use #pragma simd instead of
* #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to
* compile with -qopenmp or -qopenmp-simd and define
* SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */
#if HEDLEY_INTEL_VERSION_CHECK(18,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_
#endif
/* MSVC emits a diagnostic when we call a function (like
* simde_mm_set_epi32) while initializing a struct. We currently do
* this a *lot* in the tests. */
#if \
defined(HEDLEY_MSVC_VERSION)
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_
#endif
/* This warning needs a lot of work. It is triggered if all you do is
* pass the value to memcpy/__builtin_memcpy, or if you initialize a
* member of the union, even if that member takes up the entire union.
* Last tested with clang-10, hopefully things will improve in the
* future; if clang fixes this I'd love to enable it. */
#if \
HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_
#endif
/* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which
* will is false. However, SIMDe uses these operations exclusively
* for things like _mm_cmpeq_ps, for which we really do want to check
* for equality (or inequality).
*
* If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro
* which just wraps a check in some code do disable this diagnostic I'd
* be happy to accept it. */
#if \
HEDLEY_HAS_WARNING("-Wfloat-equal") || \
HEDLEY_GCC_VERSION_CHECK(3,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_
#endif
/* This is because we use HEDLEY_STATIC_ASSERT for static assertions.
* If Hedley can't find an implementation it will preprocess to
* nothing, which means there will be a trailing semi-colon. */
#if HEDLEY_HAS_WARNING("-Wextra-semi")
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"")
#elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_
#endif
/* We do use a few variadic macros, which technically aren't available
* until C99 and C++11, but every compiler I'm aware of has supported
* them for much longer. That said, usage is isolated to the test
* suite and compilers known to support them. */
#if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0)
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \
_Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \
_Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"")
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_
#endif
/* emscripten requires us to use a __wasm_unimplemented_simd128__ macro
* before we can access certain SIMD intrinsics, but this diagnostic
* warns about it being a reserved name. It is a reserved name, but
* it's reserved for the compiler and we are using it to convey
* information to the compiler. */
#if HEDLEY_HAS_WARNING("-Wdouble-promotion")
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_
#endif
/* clang 3.8 warns about the packed attribute being unnecessary when
* used in the _mm_loadu_* functions. That *may* be true for version
* 3.8, but for later versions it is crucial in order to make unaligned
* access safe. */
#if HEDLEY_HAS_WARNING("-Wpacked")
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PACKED_
#endif
/* Triggered when assigning a float to a double implicitly. We use
* explicit casts in SIMDe, this is only used in the test suite. */
#if HEDLEY_HAS_WARNING("-Wdouble-promotion")
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_
#endif
/* Several compilers treat conformant array parameters as VLAs. We
* test to make sure we're in C mode (C++ doesn't support CAPs), and
* that the version of the standard supports CAPs. We also reject
* some buggy compilers like MSVC (the logic is in Hedley if you want
* to take a look), but with certain warnings enabled some compilers
* still like to emit a diagnostic. */
#if HEDLEY_HAS_WARNING("-Wvla")
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VLA_
#endif
#if HEDLEY_HAS_WARNING("-Wused-but-marked-unused")
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_
#endif
#if HEDLEY_HAS_WARNING("-Wunused-function")
#define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("clang diagnostic ignored \"-Wunused-function\"")
#elif HEDLEY_GCC_VERSION_CHECK(3,4,0)
#define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("GCC diagnostic ignored \"-Wunused-function\"")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */
#define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ __pragma(warning(disable:4505))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_
#endif
#if HEDLEY_HAS_WARNING("-Wpass-failed")
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_
#endif
#if HEDLEY_HAS_WARNING("-Wpadded")
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"")
#elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PADDED_
#endif
#if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant")
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_
#endif
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_
#endif
#if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_
#endif
/* clang will emit this warning when we use C99 extensions whan not in
* C99 mode, even though it does support this. In such cases we check
* the compiler and version first, so we know it's not a problem. */
#if HEDLEY_HAS_WARNING("-Wc99-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_
#endif
/* https://github.com/simd-everywhere/simde/issues/277 */
#if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_
#endif
/* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS
* to silence, but you have to do that before including anything and
* that would require reordering includes. */
#if defined(_MSC_VER)
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996))
#else
#define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_
#endif
/* Some compilers, such as clang, may use `long long` for 64-bit
* integers, but `long long` triggers a diagnostic with
* -Wc++98-compat-pedantic which says 'long long' is incompatible with
* C++98. */
#if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_
#endif
/* Some problem as above */
#if HEDLEY_HAS_WARNING("-Wc++11-long-long")
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_
#endif
/* emscripten emits this whenever stdin/stdout/stderr is used in a
* macro. */
#if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion")
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_
#endif
/* Clang uses C11 generic selections to implement some AltiVec
* functions, which triggers this diagnostic when not compiling
* in C11 mode */
#if HEDLEY_HAS_WARNING("-Wc11-extensions")
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_
#endif
/* Clang sometimes triggers this warning in macros in the AltiVec and
* NEON headers, or due to missing functions. */
#if HEDLEY_HAS_WARNING("-Wvector-conversion")
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"")
/* For NEON, the situation with -Wvector-conversion in clang < 10 is
* bad enough that we just disable the warning altogether. */
#if defined(__arm__) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#else
#define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
#endif
#if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_)
#define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_
#endif
/* SLEEF triggers this a *lot* in their headers */
#if HEDLEY_HAS_WARNING("-Wignored-qualifiers")
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"")
#elif HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_
#endif
/* GCC emits this under some circumstances when using __int128 */
#if HEDLEY_GCC_VERSION_CHECK(4,8,0)
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"")
#else
#define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_
#endif
#define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \
SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \
SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \
SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \
SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \
SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \
SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \
SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \
SIMDE_DIAGNOSTIC_DISABLE_VLA_ \
SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \
SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ \
SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \
SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \
SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_
#endif /* !defined(SIMDE_DIAGNOSTIC_H) */
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelIntensity(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
if (image->alpha_trait == BlendPixelTrait)
return(status);
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
return(status);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
wyhash.h | /* Author: Wang Yi <godspeed_china@yeah.net> */
#ifndef wyhash_version_1
#define wyhash_version_1
#include <stdint.h>
#include <string.h>
#include <math.h>
#if defined(_MSC_VER) && defined(_M_X64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif
const uint64_t _wyp0=0xa0761d6478bd642full, _wyp1=0xe7037ed1a0b428dbull, _wyp2=0x8ebc6af09c88c6e3ull;
const uint64_t _wyp3=0x589965cc75374cc3ull, _wyp4=0x1d8e4e27c47d124full, _wyp5=0xeb44accab455d165ull;
static inline uint64_t _wymum(uint64_t A, uint64_t B){
#ifdef __SIZEOF_INT128__
__uint128_t r=A; r*=B; return (r>>64)^r;
#elif defined(_MSC_VER) && defined(_M_X64)
A=_umul128(A, B, &B); return A^B;
#else
uint64_t ha=A>>32, hb=B>>32, la=(uint32_t)A, lb=(uint32_t)B, hi, lo;
uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c;
return hi^lo;
#endif
}
static inline uint64_t _wyr08(const uint8_t *p){ uint8_t v; memcpy(&v, p, 1); return v; }
static inline uint64_t _wyr16(const uint8_t *p){ uint16_t v; memcpy(&v, p, 2); return v; }
static inline uint64_t _wyr32(const uint8_t *p){ uint32_t v; memcpy(&v, p, 4); return v; }
static inline uint64_t _wyr64(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return v; }
static inline uint64_t __wyr64(const uint8_t *p){ return (_wyr32(p)<<32)|_wyr32(p+4); }
static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed){
const uint8_t *p=(const uint8_t*)key; uint64_t i;
for(i=0; i+32<=len; i+=32, p+=32) seed=_wymum(seed^_wyp0, _wymum(_wyr64(p)^_wyp1,_wyr64(p+8)^_wyp2)^_wymum(_wyr64(p+16)^_wyp3,_wyr64(p+24)^_wyp4));
seed^=_wyp0;
switch(len&31){
case 1: seed=_wymum(seed,_wyr08(p)^_wyp1); break;
case 2: seed=_wymum(seed,_wyr16(p)^_wyp1); break;
case 3: seed=_wymum(seed,((_wyr16(p)<<8)|_wyr08(p+2))^_wyp1); break;
case 4: seed=_wymum(seed,_wyr32(p)^_wyp1); break;
case 5: seed=_wymum(seed,((_wyr32(p)<<8)|_wyr08(p+4))^_wyp1); break;
case 6: seed=_wymum(seed,((_wyr32(p)<<16)|_wyr16(p+4))^_wyp1); break;
case 7: seed=_wymum(seed,((_wyr32(p)<<24)|(_wyr16(p+4)<<8)|_wyr08(p+6))^_wyp1); break;
case 8: seed=_wymum(seed,__wyr64(p)^_wyp1); break;
case 9: seed=_wymum(__wyr64(p)^seed,_wyr08(p+8)^_wyp2); break;
case 10: seed=_wymum(__wyr64(p)^seed,_wyr16(p+8)^_wyp2); break;
case 11: seed=_wymum(__wyr64(p)^seed,((_wyr16(p+8)<<8)|_wyr08(p+8+2))^_wyp2); break;
case 12: seed=_wymum(__wyr64(p)^seed,_wyr32(p+8)^_wyp2); break;
case 13: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<8)|_wyr08(p+8+4))^_wyp2); break;
case 14: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<16)|_wyr16(p+8+4))^_wyp2); break;
case 15: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<24)|(_wyr16(p+8+4)<<8)|_wyr08(p+8+6))^_wyp2); break;
case 16: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2); break;
case 17: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr08(p+16)^_wyp3); break;
case 18: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr16(p+16)^_wyp3); break;
case 19: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr16(p+16)<<8)|_wyr08(p+16+2))^_wyp3); break;
case 20: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr32(p+16)^_wyp3); break;
case 21: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<8)|_wyr08(p+16+4))^_wyp3); break;
case 22: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<16)|_wyr16(p+16+4))^_wyp3); break;
case 23: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<24)|(_wyr16(p+16+4)<<8)|_wyr08(p+16+6))^_wyp3); break;
case 24: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,__wyr64(p+16)^_wyp3); break;
case 25: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr08(p+24)^_wyp4); break;
case 26: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr16(p+24)^_wyp4); break;
case 27: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr16(p+24)<<8)|_wyr08(p+24+2))^_wyp4); break;
case 28: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr32(p+24)^_wyp4); break;
case 29: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<8)|_wyr08(p+24+4))^_wyp4); break;
case 30: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<16)|_wyr16(p+24+4))^_wyp4); break;
case 31: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<24)|(_wyr16(p+24+4)<<8)|_wyr08(p+24+6))^_wyp4); break;
}
return _wymum(seed, len^_wyp5);
}
static inline uint64_t wyhash256(const void* key){
const uint8_t *p=(const uint8_t*)key;
return _wymum(_wymum(_wyr64(p)^_wyp1,_wyr64(p+8)^_wyp2),_wymum(_wyr64(p+16)^_wyp3,_wyr64(p+24)^_wyp4));
}
static inline uint64_t wyhash64(uint64_t A, uint64_t B){ return _wymum(_wymum(A^_wyp0, B^_wyp1), _wyp2); }
static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r&0x000fffffffffffffull)*_wynorm; }
static inline float wy2gau(uint64_t r){ const float _wynorm1=1.0f/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+(r>>43))*_wynorm1-3.0f; }
static inline uint64_t wyrand(uint64_t *seed){ *seed+=_wyp0; return _wymum(*seed^_wyp1,*seed); }
static uint64_t _wyrand_seed=0;
#define WYRAND_MAX 0xffffffffffffffffull
static inline void wysrand(uint64_t seed){ _wyrand_seed=seed; }
static inline uint64_t wyrand(void){
uint64_t s;
#if defined(_OPENMP)
#pragma omp atomic capture
#endif
{
_wyrand_seed += _wyp0;
s = _wyrand_seed;
}
return _wymum(s^_wyp1,s);
}
#endif
|
for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp for simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp for simd'}}
#pragma omp for simd foo
void test_no_clause() {
int i;
#pragma omp for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp for simd' must be a for loop}}
#pragma omp for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
#pragma omp for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
#pragma omp for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd collapse(2)
for (i = 0; i < 16; ++i) // expected-note {{defined as lastprivate}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for simd' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
GB_binop__land_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_uint16
// A.*B function (eWiseMult): GB_AemultB__land_uint16
// A*D function (colscale): GB_AxD__land_uint16
// D*A function (rowscale): GB_DxB__land_uint16
// C+=B function (dense accum): GB_Cdense_accumB__land_uint16
// C+=b function (dense accum): GB_Cdense_accumb__land_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint16
// C=scalar+B GB_bind1st__land_uint16
// C=scalar+B' GB_bind1st_tran__land_uint16
// C=A+scalar GB_bind2nd__land_uint16
// C=A'+scalar GB_bind2nd_tran__land_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT16 || GxB_NO_LAND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
logger.h | /*
* logger.h
*
* Created on: 2011/04/11
* Author: shu
*/
#ifndef LOGGER_H_
#define LOGGER_H_
#include <iostream>
#include <ostream>
#include <string>
class Logger {
public:
static Logger* GetInstance() {
static Logger instance;
return &instance;
}
void ErrorLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "error : " << message << std::endl;
}
void WarningLog(std::string message) {
//#pragma omp critical(lock_)
std::cout << "warning : " << &message << std::endl;
}
void Log(std::string message) {
//#pragma omp critical(lock_)
std::cout << message << std::endl;
}
private:
Logger()
{
}
~Logger() {
}
Logger(const Logger& rhs);
Logger operator=(const Logger& rhs);
};
#endif /* LOGGER_H_ */
|
CutPursuit_Linear.h | #pragma once
#include "Common.h"
#include "CutPursuit.h"
namespace CP {
template <typename T>
class CutPursuit_Linear : public CutPursuit<T>
{
public:
~CutPursuit_Linear(){
};
// virtual ~CutPursuit_Linear();
std::vector<std::vector<T>> componentVector;
// only used with backward step - the sum of all observation in the component
CutPursuit_Linear(uint32_t nbVertex = 1) : CutPursuit<T>(nbVertex)
{
this->componentVector = std::vector<std::vector<T>>(1);
}
virtual std::pair<T,T> compute_energy() override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
std::pair<T,T> pair_energy;
T energy = 0;
VertexIterator<T> i_ver;
//#pragma omp parallel for private(i_ver) if (this->parameter.parallel)
for (i_ver = boost::vertices(this->main_graph).first;
i_ver != this->lastIterator; ++i_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
energy -= vertex_attribute_map(*i_ver).weight
* vertex_attribute_map(*i_ver).observation[i_dim]
* vertex_attribute_map(*i_ver).value[i_dim];
}
}
pair_energy.first = energy;
energy = 0;
EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second;
for (i_edg = boost::edges(this->main_graph).first;
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth
* edge_attribute_map(*i_edg).weight;
}
pair_energy.second = energy;
return pair_energy;
}
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split()
{ // split the graph by trying to find the best binary partition
// each components is split into B and notB
uint32_t saturation;
//initialize h_1 and h_2 with kmeans
//--------initilializing labels------------------------------------------------------------
//corner contains the two most likely class for each component
std::vector< std::vector< uint32_t > > corners =
std::vector< std::vector< uint32_t > >(this->components.size(),
std::vector< uint32_t >(2,0));
this->compute_corners(corners);
this->set_capacities(corners);
//compute flow
boost::boykov_kolmogorov_max_flow(
this->main_graph,
get(&EdgeAttribute<T>::capacity , this->main_graph),
get(&EdgeAttribute<T>::residualCapacity, this->main_graph),
get(&EdgeAttribute<T>::edge_reverse , this->main_graph),
get(&VertexAttribute<T>::color , this->main_graph),
get(boost::vertex_index , this->main_graph),
this->source,
this->sink);
saturation = this->activate_edges();
return saturation;
}
//=============================================================================================
//============================= COMPUTE CORNERS ===================================
//=============================================================================================
inline void compute_corners(std::vector< std::vector< uint32_t > > & corners)
{ //-----compute the 2 most populous labels------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t i_com =0;i_com < this->components.size(); i_com++)
{
if (this->saturated_components[i_com])
{
continue;
}
std::pair<uint32_t, uint32_t> corners_pair = find_corner(i_com);
corners[i_com][0] = corners_pair.first;
corners[i_com][1] = corners_pair.second;
}
return;
}
//=============================================================================================
//============================= find_corner =======================================
//=============================================================================================
std::pair<uint32_t, uint32_t> find_corner(const uint32_t & i_com)
{
// given a component will output the pairs of the two most likely labels
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
std::vector<T> average_vector(this->dim,0);
for (uint32_t i_ver = 0; i_ver < this->components[i_com].size(); i_ver++)
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
average_vector.at(i_dim) += vertex_attribute_map[this->components[i_com][i_ver]].observation[i_dim]
* vertex_attribute_map[this->components[i_com][i_ver]].weight;
}
}
uint32_t indexOfMax = 0;
for(uint32_t i_dim=1; i_dim < this->dim; i_dim++)
{
if(average_vector.at(indexOfMax) < average_vector.at(i_dim))
{
indexOfMax = i_dim;
}
}
average_vector[indexOfMax] = -1;
uint32_t indexOfSndMax = 0;
for(uint32_t i_dim=1; i_dim < this->dim; i_dim++)
{
if(average_vector[indexOfSndMax] < average_vector[i_dim])
{
indexOfSndMax = i_dim;
}
}
return std::pair<uint32_t, uint32_t>(indexOfMax, indexOfSndMax);
}
//=============================================================================================
//============================= SET_CAPACITIES =======================================
//=============================================================================================
inline void set_capacities(const std::vector< std::vector< uint32_t > > & corners)
{
VertexDescriptor<T> desc_v;
EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source;
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
T cost_B, cost_notB; //the cost of being in B or not B, local for each component
//----first compute the capacity in sink/node edges------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t i_com = 0; i_com < this->components.size(); i_com++)
{
if (this->saturated_components[i_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[i_com].size(); i_ver++)
{
desc_v = this->components[i_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first;
desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead
desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first;
cost_B = 0;
cost_notB = 0;
if (vertex_attribute_map(desc_v).weight==0)
{
edge_attribute_map(desc_source2v).capacity = 0;
edge_attribute_map(desc_v2sink).capacity = 0;
continue;
}
cost_B += vertex_attribute_map(desc_v).observation[corners[i_com][0]];
cost_notB += vertex_attribute_map(desc_v).observation[corners[i_com][1]];
if (cost_B>cost_notB)
{
edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB;
edge_attribute_map(desc_v2sink).capacity = 0.;
}
else
{
edge_attribute_map(desc_source2v).capacity = 0.;
edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B;
}
}
}
//----then set the vertex to vertex edges ---------------------------------------------
EdgeIterator<T> i_edg, i_edg_end;
for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph);
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
if (!edge_attribute_map(*i_edg).isActive)
{
edge_attribute_map(*i_edg).capacity
= edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth;
}
else
{
edge_attribute_map(*i_edg).capacity = 0;
}
}
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & i_com) override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
if (i_com == 0)
{ // we allocate the space necessary for the component vector at the first read of the component
this-> componentVector = std::vector<std::vector<T>>(this->components.size());
}
std::vector<T> average_vector(this->dim), component_value(this->dim);
T total_weight = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
average_vector[i_dim] = 0;
}
for (uint32_t ind_ver = 0; ind_ver < this->components[i_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
average_vector[i_dim] += vertex_attribute_map[this->components[i_com][ind_ver]].observation[i_dim]
* vertex_attribute_map[this->components[i_com][ind_ver]].weight;
}
total_weight += vertex_attribute_map[this->components[i_com][ind_ver]].weight;
vertex_attribute_map(this->components[i_com][ind_ver]).in_component = i_com;
}
this->componentVector[i_com] = average_vector;
uint32_t indexOfMax = 0;
for(uint32_t i_dim=1; i_dim < this->dim; i_dim++)
{
if(average_vector[indexOfMax] < average_vector[i_dim])
{
indexOfMax = i_dim;
}
}
for (uint32_t ind_ver = 0; ind_ver < this->components[i_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
if (i_dim == indexOfMax)
{
component_value[i_dim] = 1;
vertex_attribute_map(this->components[i_com][ind_ver]).value[i_dim] = 1;
}
else
{
component_value[i_dim] = 0;
vertex_attribute_map(this->components[i_com][ind_ver]).value[i_dim] = 0;
}
}
}
return std::pair<std::vector<T>, T>(component_value, total_weight);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2) override
{
VertexAttributeMap<T> reduced_vertex_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
VertexIndexMap<T> reduced_vertex_vertex_index_map = get(boost::vertex_index, this->reduced_graph);
std::vector<T> merge_value(this->dim), mergedVector(this->dim);
T gain = 0;
// compute the value obtained by mergeing the two connected components
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
mergedVector[i_dim] = this->componentVector[reduced_vertex_vertex_index_map(comp1)][i_dim]
+ this->componentVector[reduced_vertex_vertex_index_map(comp2)][i_dim];
}
uint32_t indexOfMax = 0;
for(uint32_t i_dim=1; i_dim < this->dim; i_dim++)
{
if(mergedVector[indexOfMax] < mergedVector[i_dim])
{
indexOfMax = i_dim;
}
}
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
if (i_dim == indexOfMax)
{
merge_value[i_dim] = 1;
}
else
{
merge_value[i_dim] = 0;
}
gain += mergedVector[i_dim] * merge_value[i_dim]
- this->componentVector[reduced_vertex_vertex_index_map(comp1)][i_dim]
* reduced_vertex_attribute_map(comp1).value[i_dim]
- this->componentVector[reduced_vertex_vertex_index_map(comp2)][i_dim]
* reduced_vertex_attribute_map(comp2).value[i_dim];
}
return std::pair<std::vector<T>, T>(merge_value, gain);
}
};
}
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) && ( GxB_IMPLEMENTATION >= GxB_VERSION (5,0,0) )
printf ("v5.0.0 not supported\n") ;
return (GrB_PANIC) ;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if (v_output == NULL || (A == NULL && AT == NULL))
{
// required output argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
bool use_vxm_with_A ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
if (A == NULL)
{
// only AT is provided
LAGr_Matrix_ncols (&nrows, AT) ;
LAGr_Matrix_nrows (&ncols, AT) ;
LAGr_Matrix_nvals (&nvalA, AT) ;
use_vxm_with_A = false ;
}
else
{
// A is provided. AT may or may not be provided
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nvalA, A) ;
use_vxm_with_A = true ;
}
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// check the format of A and AT
//--------------------------------------------------------------------------
bool csr = true ;
// csr is true if A and AT are known (or assumed) to be in CSR format; if
// false, they are known to be in CSC format.
// This can be tested in SuiteSparse:GraphBLAS. Other libraries can use
// this section for their own library-specific tests, if they have them.
// LAGraph_bfs_pushpull will work just fine if nothing is changed or if the
// following is disabled (even SuiteSparse:GraphBLAS). The push/pull
// behaviour will be unpredicatble, however, unless the library default
// format is CSR.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
// The CSR vs CSC status can be tested in SuiteSparse:GraphBLAS.
// However, even with SuiteSparse:GraphBLAS, this step is optional.
GxB_Format_Value A_format = -1, AT_format = -1 ;
bool A_csr = true, AT_csr = true ;
if (A != NULL)
{
// A_csr is true if accessing A(i,:) is fast
LAGr_get (A , GxB_FORMAT, &A_format) ;
A_csr = (A_format == GxB_BY_ROW) ;
}
if (AT != NULL)
{
// AT_csr is true if accessing AT(i,:) is fast
LAGr_get (AT, GxB_FORMAT, &AT_format) ;
AT_csr = (AT_format == GxB_BY_ROW) ;
}
// Assume CSR if A(i,:) and AT(i,:) are both fast. If csr is false,
// then the algorithm below will reverse the use of vxm and mxv.
csr = A_csr && AT_csr ;
if (push_pull)
{
// both A and AT are provided. Require they have the same format.
// Either both A(i,:) and AT(i,:) are efficient to accesss, or both
// A(:,j) and AT(:,j) are efficient to access.
if (A_csr != AT_csr)
{
LAGRAPH_ERROR ("A and AT must in the same format:\n"
"both GxB_BY_ROW, or both GxB_BY_COL",
GrB_INVALID_VALUE) ;
}
}
else
{
// only A or AT are provided. Refuse to do the pull-only version.
if (A != NULL && A_format == GxB_BY_COL)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: AT not provided, so A must be GxB_BY_ROW\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
if (AT != NULL && AT_format == GxB_BY_ROW)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: A not provided, so AT must be GxB_BY_COL\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
}
#endif
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN (n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
LAGr_Vector_new (&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX (256, sqrt ((double) n)) ;
if (!vsparse)
{
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
LAGr_assign (v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if (compute_tree)
{
// create an integer vector q, and set q(source) to source+1
LAGr_Vector_new (&q, int_type, n) ;
LAGr_Vector_setElement (q, source+1, source) ;
if (n > INT32_MAX)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT64 ;
second_semiring = LAGraph_MIN_SECOND_INT64 ;
#endif
}
else
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT32 ;
second_semiring = LAGraph_MIN_SECOND_INT32 ;
#endif
}
// create the empty parent vector
LAGr_Vector_new (&pi, int_type, n) ;
if (!vsparse)
{
// make pi a dense vector of all zeros
LAGr_assign (pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
LAGr_Vector_setElement (pi, source+1, source) ;
}
else
{
// create a boolean vector q, and set q(source) to true
LAGr_Vector_new (&q, GrB_BOOL, n) ;
LAGr_Vector_setElement (q, true, source) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
#else
// can terminate early, but requires more data movement internally
first_semiring = LAGraph_LOR_FIRST_BOOL ;
second_semiring = LAGraph_LOR_SECOND_BOOL ;
#endif
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for (int64_t level = 1 ; ; level++)
{
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
LAGr_assign (v, q, NULL, level, GrB_ALL, n, GrB_DESC_S) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if (nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if (vsparse && nvisited > vlimit)
{
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
LAGr_assign (v, v, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
LAGr_Vector_nvals (&ignore, v) ;
if (compute_tree)
{
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
LAGr_assign (pi, pi, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
LAGr_Vector_nvals (&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if (push_pull)
{
double pushwork = d * nq ;
double expected = (double) n / (double) (nvisited+1) ;
double per_dot = LAGRAPH_MIN (d, expected) ;
double binarysearch = (3 * (1 + log2 ((double) nq))) ;
double pullwork = (n-nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
if (!csr)
{
// Neither A(i,:) nor AT(i,:) is efficient. Instead, both
// A(:,j) and AT(:,j) is fast (that is, the two matrices
// are in CSC format). Swap the
use_vxm_with_A = !use_vxm_with_A ;
}
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if (use_vxm_with_A)
{
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
LAGr_vxm (q, v, NULL, first_semiring, q, A, GrB_DESC_RC) ;
}
else
{
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
LAGr_mxv (q, v, NULL, second_semiring, AT, q, GrB_DESC_RC) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if (compute_tree)
{
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
LAGr_assign (pi, q, NULL, q, GrB_ALL, n, GrB_DESC_S) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
GrB_Index *qi ;
bool jumbled ;
int64_t q_size ;
GrB_Index qi_size, qx_size ;
if (n > INT32_MAX)
{
int64_t *qx ;
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_export_CSC (&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_export_CSC (&q, &int_type, &n, &q_size, &nq,
&jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
#endif
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int64_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_import_CSC (&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_import_CSC (&q, int_type, n, q_size, nq,
jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
#endif
}
else
{
int32_t *qx ;
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_export_CSC (&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_export_CSC (&q, &int_type, &n, &q_size, &nq,
&jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
#endif
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int32_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_import_CSC (&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_import_CSC (&q, int_type, n, q_size, nq,
jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
#endif
}
#else
// TODO: use extractTuples and build instead
// Or use something like:
// extract tuples into I
// let e = 1:n be created once, in initialization phase
// q<q> = e (I)
fprintf (stderr, "TODO: use extractTuples here\n") ;
abort ( ) ;
#endif
}
else
{
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
LAGr_Vector_nvals (&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if (compute_tree)
{
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
#endif
}
|
eltwise_layernorm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <immintrin.h>
/* include c-based dnn library */
#include "../deeplearning/common/dnn_common.h"
#if defined(_OPENMP)
# include <omp.h>
#endif
#define EPS 1e-9
LIBXSMM_INLINE
void sfill_matrix ( float *matrix, unsigned int ld, unsigned int m, unsigned int n )
{
unsigned int i, j;
double dtmp;
if ( ld < m )
{
fprintf(stderr,"Error is sfill_matrix: ld=%u m=%u mismatched!\n",ld,m);
exit(EXIT_FAILURE);
}
for ( j = 1; j <= n; j++ )
{
/* Fill through the leading dimension */
for ( i = 1; i <= ld; i++ )
{
dtmp = 1.0 - 2.0*libxsmm_rng_f64();
matrix [ (j-1)*ld + (i-1) ] = (float) dtmp;
}
}
}
LIBXSMM_INLINE
void naive_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout_ref, float *mean_data_ref, float *rstd_data_ref)
{
int i, j;
#if defined(_OPENMP)
#pragma omp parallel for private(j)
#endif
for (j = 0; j < n; j++) {
float mean_val_ref = 0, rstd_val_ref = 0, scale_ref = 0, bias_ref = 0, gamma_val_ref = 0, beta_val_ref = 0;
mean_data_ref[j] = 0;
rstd_data_ref[j] = 0;
for (i = 0; i < m; i++) {
mean_data_ref[j] += sinp[j*ld_in + i];
rstd_data_ref[j] += sinp[j*ld_in + i] * sinp[j*ld_in + i];
}
mean_val_ref = mean_data_ref[j]/m;
rstd_val_ref = (rstd_data_ref[j]/m)-mean_val_ref*mean_val_ref;
rstd_val_ref = 1/((float)sqrt(rstd_val_ref));
mean_data_ref[j] = mean_val_ref;
rstd_data_ref[j] = rstd_val_ref;
scale_ref = rstd_val_ref;
bias_ref = -1.f * rstd_val_ref * mean_val_ref;
for (i = 0; i < m; i++) {
gamma_val_ref = gamma[i];
beta_val_ref = beta[i];
sout_ref[j*ld_in+i] += (sinp[j*ld_in+i] * scale_ref + bias_ref) * gamma_val_ref + beta_val_ref;
}
}
}
LIBXSMM_INLINE
void naive_layernorm_bwd(int m, int n, int ld_in, float *dY, float *X, float *mean, float *rstd, float *gamma, float *dX, float *dgamma, float *dbeta)
{
float a, b, c, ds, db, scale = (float)(1.0 / m);
int i, j;
for (i = 0; i < m; i++) {
dgamma[i] = 0;
dbeta[i] = 0;
}
for (j = 0; j < n; j++) {
a = rstd[j];
b = -1.f * a * mean[j];
ds = 0;
db = 0;
for (i = 0; i < m; i++) {
dgamma[i] += dY[j*ld_in+i] * (a * X[j*ld_in+i] + b);
dbeta[i] += dY[j*ld_in+i];
ds += dY[j*ld_in+i] * X[j*ld_in+i] * gamma[i];
db += dY[j*ld_in+i] * gamma[i];
}
b = (db * mean[j] - ds) * a * a * a * scale;
c = -1.f * b * mean[j] - db * a * scale;
for (i = 0; i < m; i++) {
dX[j*ld_in+i] = a * dY[j*ld_in+i] * gamma[i] + b * X[j*ld_in+i] + c;
}
}
}
LIBXSMM_INLINE
void optimized_layernorm(int m, int n, int ld_in, float *sinp, float *gamma, float *beta, float *sout, float *mean_data, float *rstd_data, libxsmm_meltwfunction_reduce reduce_kernel, libxsmm_meltwfunction_scale scalemean_kernel, libxsmm_meltwfunction_scale scaleout_kernel, float * bias_aux)
{
int i;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
#endif
libxsmm_meltw_reduce_param reduce_params;
libxsmm_meltw_scale_param scalemean_params;
libxsmm_meltw_scale_param scaleout_params;
reduce_params.in_ptr = sinp;
reduce_params.out_ptr_0 = mean_data;
reduce_params.out_ptr_1 = rstd_data;
reduce_kernel(&reduce_params);
scalemean_params.in_ptr = mean_data;
scalemean_params.out_ptr = mean_data;
scalemean_params.scale_vals_ptr = &reverse_m;
scalemean_kernel(&scalemean_params);
scalemean_params.in_ptr = rstd_data;
scalemean_params.out_ptr = rstd_data;
scalemean_kernel(&scalemean_params);
/* Calculate rstd and auxiliary bias vectors*/
#if defined(__AVX512F__)
for (i = 0; i < n-15; i+= 16) {
__m512 vrstd = _mm512_loadu_ps(rstd_data+i);
__m512 vmean = _mm512_loadu_ps(mean_data+i);
vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_storeu_ps(rstd_data+i, vrstd);
_mm512_storeu_ps(bias_aux+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
if (i < n) {
int rem = n - i;
__mmask16 mask = (1 << rem) - 1;
__m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_data+i);
__m512 vmean = _mm512_maskz_loadu_ps(mask, mean_data+i);
vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_mask_storeu_ps(rstd_data+i, mask, vrstd );
_mm512_mask_storeu_ps(bias_aux+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
#else
for (i = 0; i < n; i++) {
rstd_data[i] = (float)(1.0 / sqrt(rstd_data[i] - mean_data[i] * mean_data[i]));
bias_aux[i] = -1.f * mean_data[i] * rstd_data[i];
}
#endif
scaleout_params.in_ptr = sinp;
scaleout_params.out_ptr = sout;
scaleout_params.scale_vals_ptr = rstd_data;
scaleout_params.bias_vals_ptr = bias_aux;
scaleout_params.scale_vals_ptr2 = gamma;
scaleout_params.bias_vals_ptr2 = beta;
scaleout_kernel(&scaleout_params);
}
LIBXSMM_INLINE
void optimized_blocked_layernorm(int m, int n, int bm, int bn, float *data_in, float *gamma_data, float *beta_data, float *mean_data, float *rstd_data)
{
int ld = bm, ld_vector = bn;
libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE;
libxsmm_meltwfunction_reduce reduce_rows_kernel, reduce_cols_kernel;
libxsmm_meltw_scal_flags jit_scale_flags = 0;
libxsmm_meltwfunction_scale scale_kernel;
libxsmm_meltw_scal_flags jit_scaleout_flags = 0;
libxsmm_meltwfunction_scale scaleout_kernel;
#if defined(_OPENMP)
int threads = omp_get_max_threads(); /* number of threads */
#else
int threads = 1; /* number of threads */
#endif
int nBlocks = n/bn;
int mBlocks = m/bm;
float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + n) * sizeof(float), 0/*auto-alignment*/);
float *sums_ptr = scratch;
float *sums_sq_ptr = scratch + n * mBlocks;
float *aux_bias_ptr = scratch + 2 * n * mBlocks;
LIBXSMM_VLA_DECL(3, float, sums, sums_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(3, float, sums_sq, sums_sq_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(2, float, mean, mean_data, bn);
LIBXSMM_VLA_DECL(2, float, rstd, rstd_data, bn);
LIBXSMM_VLA_DECL(2, float, gamma, gamma_data, bm);
LIBXSMM_VLA_DECL(2, float, beta, beta_data, bm);
LIBXSMM_VLA_DECL(2, float, aux_bias, aux_bias_ptr, bn);
LIBXSMM_VLA_DECL(4, float, X, data_in, mBlocks, bn, bm);
/*libxsmm_barrier *barrier;*/
/* Generate JITED kernels for optimized code */
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS_SQUARED;
reduce_rows_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_COLS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS;
reduce_cols_kernel = libxsmm_dispatch_meltw_reduce(bn, mBlocks, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
jit_scale_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT;
scale_kernel = libxsmm_dispatch_meltw_scale(bn, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scale_flags, 0);
jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS;
scaleout_kernel = libxsmm_dispatch_meltw_scale(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags, 0);
#if defined(_OPENMP)
# pragma omp parallel
#endif
{
int i, imin, im, in;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
#endif
#if defined(_OPENMP)
const int ltid = omp_get_thread_num();
#else
const int ltid = 0;
#endif
const int work_mn = nBlocks * mBlocks;
const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1);
const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn;
const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn;
const int work_n = nBlocks;
const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1);
const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n;
const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n;
libxsmm_meltw_reduce_param reduce_rows_params, reduce_cols_params;;
libxsmm_meltw_scale_param scale_params;
libxsmm_meltw_scale_param scaleout_params;
/*libxsmm_barrier_init(barrier, ltid);*/
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
reduce_rows_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, sums, in, im, 0, mBlocks, bn);
reduce_rows_params.out_ptr_1 = &LIBXSMM_VLA_ACCESS(3, sums_sq, in, im, 0, mBlocks, bn);
reduce_rows_kernel(&reduce_rows_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
scale_params.scale_vals_ptr = &reverse_m;
for (in = thr_begin_n; in < thr_end_n; in++) {
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, sums, in, 0, 0, mBlocks, bn);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
reduce_cols_kernel(&reduce_cols_params);
scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
scale_kernel(&scale_params);
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, sums_sq, in, 0, 0, mBlocks, bn);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
reduce_cols_kernel(&reduce_cols_params);
scale_params.in_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scale_params.out_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scale_kernel(&scale_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
/* Calculate rstd and auxiliary bias vectors*/
for (in = thr_begin_n; in < thr_end_n; in++) {
float *rstd_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
float *mean_ptr = &LIBXSMM_VLA_ACCESS(2, mean, in, 0, bn);
float *bias_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn);
#if defined(__AVX512F__)
for (i = 0; i < bn-15; i+= 16) {
__m512 vrstd = _mm512_loadu_ps(rstd_ptr+i);
__m512 vmean = _mm512_loadu_ps(mean_ptr+i);
vrstd = _mm512_rsqrt14_ps(_mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_storeu_ps(rstd_ptr+i, vrstd);
_mm512_storeu_ps(bias_ptr+i, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
if (i < bn) {
int rem = bn - i;
__mmask16 mask = (1 << rem) - 1;
__m512 vrstd = _mm512_maskz_loadu_ps(mask, rstd_ptr+i);
__m512 vmean = _mm512_maskz_loadu_ps(mask, mean_ptr+i);
vrstd = _mm512_maskz_rsqrt14_ps(mask, _mm512_sub_ps(vrstd, _mm512_mul_ps(vmean, vmean)));
_mm512_mask_storeu_ps(rstd_ptr+i, mask, vrstd );
_mm512_mask_storeu_ps(bias_ptr+i, mask, _mm512_mul_ps(minus_ones, _mm512_mul_ps(vmean, vrstd)));
}
#else
for (i = 0; i < bn; i++) {
rstd_ptr[i] = (float)(1.0 / sqrt(rstd_ptr[i] - mean_ptr[i] * mean_ptr[i]));
bias_ptr[i] = -1.f * mean_ptr[i] * mean_ptr[i];
}
#endif
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
scaleout_params.in_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
scaleout_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, X, in, im, 0, 0, mBlocks, bn, bm);
scaleout_params.scale_vals_ptr = &LIBXSMM_VLA_ACCESS(2, rstd, in, 0, bn);
scaleout_params.bias_vals_ptr = &LIBXSMM_VLA_ACCESS(2, aux_bias, in, 0, bn);
scaleout_params.scale_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, gamma, im, 0, bm);
scaleout_params.bias_vals_ptr2 = &LIBXSMM_VLA_ACCESS(2, beta, im, 0, bm);
scaleout_kernel(&scaleout_params);
}
#pragma omp barrier
/*libxsmm_barrier_wait(barrier, ltid);*/
}
libxsmm_free(scratch);
}
LIBXSMM_INLINE
void optimized_blocked_layernorm_bwd(int m, int n, int bm, int bn, float *_dY, float *_X, float *_mean, float *_rstd, float *_gamma, float *_dX, float *_dgamma, float *_dbeta)
{
int ld = bm, ld_vector = bn;
libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE;
libxsmm_meltwfunction_reduce reduce_rows_kernel, reduce_cols_kernel, reduce_cols_kernel2, reduce_cols_kernel3;
int nBlocks = n/bn;
int mBlocks = m/bm;
float *const scratch = (float*)libxsmm_aligned_scratch((2 * n * mBlocks + 2 * m * nBlocks + 2 * n) * sizeof(float), 0/*auto-alignment*/);
float *dgamma_aux_ptr = scratch;
float *dbeta_aux_ptr = scratch + m * nBlocks;
float *ds_aux_ptr = scratch + 2 * m * nBlocks;
float *db_aux_ptr = scratch + 2 * m * nBlocks + n * mBlocks;
float *db_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks;
float *ds_ptr = scratch + 2 * m * nBlocks + 2 * n * mBlocks + n;
LIBXSMM_VLA_DECL(3, float, ds_aux, ds_aux_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(3, float, db_aux, db_aux_ptr, mBlocks, bn);
LIBXSMM_VLA_DECL(3, float, dgamma_aux, dgamma_aux_ptr, nBlocks, bm);
LIBXSMM_VLA_DECL(3, float, dbeta_aux, dbeta_aux_ptr, nBlocks, bm);
LIBXSMM_VLA_DECL(4, float, dY, _dY, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(4, float, X, _X, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(4, float, dX, _dX, mBlocks, bn, bm);
LIBXSMM_VLA_DECL(2, float, mean, _mean, bn);
LIBXSMM_VLA_DECL(2, float, rstd, _rstd, bn);
LIBXSMM_VLA_DECL(2, float, gamma, _gamma, bm);
LIBXSMM_VLA_DECL(2, float, dgamma, _dgamma, bm);
LIBXSMM_VLA_DECL(2, float, dbeta, _dbeta, bm);
LIBXSMM_VLA_DECL(2, float, ds, ds_ptr, bn);
LIBXSMM_VLA_DECL(2, float, db, db_ptr, bn);
#if defined(_OPENMP)
int threads = omp_get_max_threads(); /* number of threads */
#else
int threads = 1; /* number of threads */
#endif
/* Generate JITED kernels for optimized code */
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS;
reduce_rows_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_COLS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS;
reduce_cols_kernel = libxsmm_dispatch_meltw_reduce(bm, bn, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
reduce_cols_kernel2 = libxsmm_dispatch_meltw_reduce(bm, nBlocks, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
reduce_cols_kernel3 = libxsmm_dispatch_meltw_reduce(bn, mBlocks, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
#if !defined(_OPENMP)
float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float), 0/*auto-alignment*/);
#else
float *const aux = (float*)libxsmm_aligned_scratch((3 * bm * bn) * sizeof(float) * omp_get_max_threads(), 0/*auto-alignment*/);
# pragma omp parallel
#endif
{
int imin, im, in, ii, jj;
float reverse_m = (float)(1.0 / m);
#if defined(__AVX512F__)
__m512 minus_ones = _mm512_set1_ps(-1.f);
__m512 scale = _mm512_set1_ps(reverse_m);
#endif
#if defined(_OPENMP)
const int ltid = omp_get_thread_num();
#else
const int ltid = 0;
#endif
const int work_mn = nBlocks * mBlocks;
const int chunksize_mn = (work_mn % threads == 0) ? (work_mn /threads) : ((work_mn / threads) + 1);
const int thr_begin_mn = (ltid * chunksize_mn < work_mn) ? (ltid * chunksize_mn) : work_mn;
const int thr_end_mn = ((ltid + 1) * chunksize_mn < work_mn) ? ((ltid + 1) * chunksize_mn) : work_mn;
const int work_n = nBlocks;
const int chunksize_n = (work_n % threads == 0) ? (work_n /threads) : ((work_n / threads) + 1);
const int thr_begin_n = (ltid * chunksize_n < work_n) ? (ltid * chunksize_n) : work_n;
const int thr_end_n = ((ltid + 1) * chunksize_n < work_n) ? ((ltid + 1) * chunksize_n) : work_n;
const int work_m = mBlocks;
const int chunksize_m = (work_m % threads == 0) ? (work_m /threads) : ((work_m / threads) + 1);
const int thr_begin_m = (ltid * chunksize_m < work_m) ? (ltid * chunksize_m) : work_m;
const int thr_end_m = ((ltid + 1) * chunksize_m < work_m) ? ((ltid + 1) * chunksize_m) : work_m;
libxsmm_meltw_reduce_param reduce_rows_params, reduce_cols_params;;
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
float *const tmp = aux + bm*bn * (ltid*3 + 0); /* aux block for db */
float *const tmp2 = aux + bm*bn * (ltid*3 + 1); /* aux block for ds */
float *const tmp3 = aux + bm*bn * (ltid*3 + 2); /* aux block for dgamma */
in = imin / mBlocks;
im = imin % mBlocks;
#if defined(__AVX512F__)
/* Prepare blocks for reductions */
for (jj = 0; jj < bn; jj++) {
__m512 vrstd = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn));
__m512 vmean = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, mean, in, jj, bn));
__m512 vb = _mm512_mul_ps(vrstd, _mm512_mul_ps(minus_ones, vmean));
for (ii = 0; ii < bm-15; ii+=16) {
__m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb);
__m512 vtmp = _mm512_mul_ps(vgamma, vdY);
_mm512_storeu_ps((float*)tmp+jj*bm+ii, vtmp);
_mm512_storeu_ps((float*)tmp2+jj*bm+ii, _mm512_mul_ps(vtmp, vX));
_mm512_storeu_ps((float*)tmp3+jj*bm+ii, _mm512_mul_ps(vdY, vaux));
}
if (ii < bm) {
int rem = bm - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux = _mm512_fmadd_ps(vrstd, vX, vb);
__m512 vtmp = _mm512_mul_ps(vgamma, vdY);
_mm512_mask_storeu_ps((float*)tmp+jj*bm+ii, mask, vtmp);
_mm512_mask_storeu_ps((float*)tmp2+jj*bm+ii, mask, _mm512_mul_ps(vtmp, vX));
_mm512_mask_storeu_ps((float*)tmp3+jj*bm+ii, mask, _mm512_mul_ps(vdY, vaux));
}
}
#endif
/* Now perform reductions */
reduce_rows_params.in_ptr = tmp;
reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, db_aux, in, im, 0, mBlocks, bn);
reduce_rows_kernel(&reduce_rows_params);
reduce_rows_params.in_ptr = tmp2;
reduce_rows_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, im, 0, mBlocks, bn);
reduce_rows_kernel(&reduce_rows_params);
reduce_cols_params.in_ptr = (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, 0, 0, mBlocks, bn, bm);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, in, 0, nBlocks, bm);
reduce_cols_kernel(&reduce_cols_params);
reduce_cols_params.in_ptr = tmp3;
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, in, 0, nBlocks, bm);
reduce_cols_kernel(&reduce_cols_params);
}
#pragma omp barrier
/* Second level of reductions */
for (in = thr_begin_n; in < thr_end_n; in++) {
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, db_aux, in, 0, 0, mBlocks, bn);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, db, in, 0, bn);
reduce_cols_kernel3(&reduce_cols_params);
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, ds_aux, in, 0, 0, mBlocks, bn);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, ds, in, 0, bn);
reduce_cols_kernel3(&reduce_cols_params);
}
for (im = thr_begin_m; im < thr_end_m; im++) {
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, dbeta_aux, im, 0, 0, nBlocks, bm);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, dbeta, im, 0, bm);
reduce_cols_kernel2(&reduce_cols_params);
reduce_cols_params.in_ptr = &LIBXSMM_VLA_ACCESS(3, dgamma_aux, im, 0, 0, nBlocks, bm);
reduce_cols_params.out_ptr_0 = &LIBXSMM_VLA_ACCESS(2, dgamma, im, 0, bm);
reduce_cols_kernel2(&reduce_cols_params);
}
#pragma omp barrier
/* Calculate auxiliary b/c vectors -- overwritten on db/ds */
for (in = thr_begin_n; in < thr_end_n; in++) {
#if defined(__AVX512F__)
for (ii = 0; ii < bn-15; ii+=16) {
__m512 vmean = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn));
__m512 vrstd = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn));
__m512 vdb = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn));
__m512 vds = _mm512_loadu_ps(&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn));
__m512 ascale = _mm512_mul_ps(vrstd, scale);
__m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale);
__m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3);
__m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale));
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), vb);
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), vc);
}
if (ii < bn) {
int rem = bn - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vmean = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, mean, in, ii, bn));
__m512 vrstd = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, rstd, in, ii, bn));
__m512 vdb = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, db, in, ii, bn));
__m512 vds = _mm512_maskz_loadu_ps(mask, &LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn));
__m512 ascale = _mm512_mul_ps(vrstd, scale);
__m512 vrstd3 = _mm512_mul_ps(_mm512_mul_ps(vrstd, vrstd), ascale);
__m512 vb = _mm512_mul_ps(_mm512_fmsub_ps(vdb, vmean, vds), vrstd3);
__m512 vc = _mm512_sub_ps(_mm512_mul_ps(_mm512_mul_ps(minus_ones, vb), vmean), _mm512_mul_ps(vdb, ascale));
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, db, in, ii, bn), mask, vb);
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(2, ds, in, ii, bn), mask, vc);
}
#endif
}
#pragma omp barrier
/* Final computation of dX */
for (imin = thr_begin_mn; imin < thr_end_mn; imin++) {
in = imin / mBlocks;
im = imin % mBlocks;
#if defined(__AVX512F__)
for (jj = 0; jj < bn; jj++) {
__m512 va = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, rstd, in, jj, bn));
__m512 vb = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, db, in, jj, bn));
__m512 vc = _mm512_set1_ps(LIBXSMM_VLA_ACCESS(2, ds, in, jj, bn));
for (ii = 0; ii < bm-15; ii+=16) {
__m512 vgamma = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_loadu_ps((float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc);
__m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma));
_mm512_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), _mm512_add_ps(vaux1, vaux2));
}
if (ii < bm) {
int rem = bm - ii;
__mmask16 mask = (1 << rem) - 1;
__m512 vgamma = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(2, gamma, im, ii, bm));
__m512 vdY = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, dY, in, im, jj, ii, mBlocks, bn, bm));
__m512 vX = _mm512_maskz_loadu_ps(mask, (float*)&LIBXSMM_VLA_ACCESS(4, X, in, im, jj, ii, mBlocks, bn, bm));
__m512 vaux1 = _mm512_fmadd_ps(vb, vX, vc);
__m512 vaux2 = _mm512_mul_ps(va, _mm512_mul_ps(vdY, vgamma));
_mm512_mask_storeu_ps((float*)&LIBXSMM_VLA_ACCESS(4, dX, in, im, jj, ii, mBlocks, bn, bm), mask, _mm512_add_ps(vaux1, vaux2));
}
}
#endif
}
#pragma omp barrier
}
libxsmm_free(scratch);
libxsmm_free(aux);
}
int main(int argc, char* argv[])
{
unsigned int m = 64, n = 64, iters = 10000, k = 0;
libxsmm_blasint ld_in = 64, ld_vector = 64, block_size = 64;
float *sinp, *gamma, *beta, *sout, *sout_nc, *mean_data, *rstd_data, *sout_ref, *mean_data_ref, *rstd_data_ref, *bias_aux;
float *dY_ref, *X_ref, *mean_ref, *rstd_ref, *gamma_ref, *dX_ref, *dgamma_ref, *dbeta_ref;
float *dY_bwd, *X_bwd, *dX_bwd, *dgamma_bwd, *dbeta_bwd, *dX_bwd_nc;
libxsmm_matdiff_info norms_out, norms_mean, norms_rstd, norms_dx, norms_dbeta, norms_dgamma;
unsigned long long l_start, l_end;
double l_total = 0, l_total2 = 0;
libxsmm_meltw_redu_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_NONE;
libxsmm_meltwfunction_reduce reduce_kernel;
libxsmm_meltw_scal_flags jit_scalemean_flags = 0;
libxsmm_meltwfunction_scale scalemean_kernel;
libxsmm_meltw_scal_flags jit_scaleout_flags = 0;
libxsmm_meltwfunction_scale scaleout_kernel;
libxsmm_init();
libxsmm_matdiff_clear(&norms_out);
libxsmm_matdiff_clear(&norms_mean);
libxsmm_matdiff_clear(&norms_rstd);
libxsmm_matdiff_clear(&norms_dx);
libxsmm_matdiff_clear(&norms_dbeta);
libxsmm_matdiff_clear(&norms_dgamma);
if ( argc > 1 ) m = atoi(argv[1]);
if ( argc > 2 ) n = atoi(argv[2]);
if ( argc > 3 ) iters = atoi(argv[3]);
if ( argc > 4 ) block_size = atoi(argv[4]);
libxsmm_init();
ld_in = m;
n = LIBXSMM_MAX(n,1);
ld_vector = n;
ld_in = LIBXSMM_MAX(ld_in,(libxsmm_blasint)m);
/* Allocate arrays */
sinp = (float*) malloc(ld_in*n*sizeof(float));
gamma = (float*) malloc(m*sizeof(float) );
beta = (float*) malloc(m*sizeof(float) );
sout = (float*) malloc(ld_in*n*sizeof(float) );
sout_nc = (float*) malloc(ld_in*n*sizeof(float) );
mean_data = (float*) malloc(n*sizeof(float) );
rstd_data = (float*) malloc(n*sizeof(float) );
dY_ref = (float*) malloc(m*n*sizeof(float));
dY_bwd = (float*) malloc(m*n*sizeof(float));
X_ref = (float*) malloc(m*n*sizeof(float));
X_bwd = (float*) malloc(m*n*sizeof(float));
mean_ref = (float*) malloc(n*sizeof(float));
rstd_ref = (float*) malloc(n*sizeof(float));
gamma_ref = (float*) malloc(m*sizeof(float));
dX_ref = (float*) malloc(m*n*sizeof(float));
dX_bwd = (float*) malloc(m*n*sizeof(float));
dX_bwd_nc = (float*) malloc(m*n*sizeof(float));
dgamma_ref= (float*) malloc(m*sizeof(float));
dgamma_bwd= (float*) malloc(m*sizeof(float));
dbeta_ref = (float*) malloc(m*sizeof(float));
dbeta_bwd = (float*) malloc(m*sizeof(float));
/* Allocate reference arrays */
mean_data_ref = (float*) malloc(n*sizeof(float) );
rstd_data_ref = (float*) malloc(n*sizeof(float) );
sout_ref = (float*) malloc(ld_in*n*sizeof(float) );
/* Allocate auxiliary arrays for optimized version */
bias_aux = (float*) malloc(n*sizeof(float) );
/* Fill matrices with random data */
sfill_matrix ( sinp, ld_in, m, n );
sfill_matrix ( gamma, ld_in, m, 1 );
sfill_matrix ( beta, ld_in, m, 1 );
sfill_matrix ( dY_ref, ld_in, m, n );
matrix_copy_NC_to_NCNC( dY_ref, dY_bwd, 1, n, m, block_size, block_size );
sfill_matrix ( X_ref, ld_in, m, n );
matrix_copy_NC_to_NCNC( X_ref, X_bwd, 1, n, m, block_size, block_size );
sfill_matrix ( mean_ref, n, n, 1 );
sfill_matrix ( rstd_ref, n, n, 1 );
sfill_matrix ( gamma_ref, m, m, 1 );
/* Calculate reference results... */
naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref);
naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref);
#if 0
/* Generate JITED kernels for optimized code */
jit_reduce_flags = LIBXSMM_MELTW_FLAG_REDUCE_ROWS | LIBXSMM_MELTW_FLAG_REDUCE_OP_ADD | LIBXSMM_MELTW_FLAG_REDUCE_ELTS | LIBXSMM_MELTW_FLAG_REDUCE_ELTS_SQUARED;
printf("JITing reduce kernel... \n");
reduce_kernel = libxsmm_dispatch_meltw_reduce(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, 0);
jit_scalemean_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS | LIBXSMM_MELTW_FLAG_SCALE_MULT;
printf("JITing mean-scale kernel... \n");
scalemean_kernel = libxsmm_dispatch_meltw_scale(n, 1, &ld_vector, &ld_vector, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scalemean_flags, 0);
jit_scaleout_flags = LIBXSMM_MELTW_FLAG_SCALE_ROWS_COLS | LIBXSMM_MELTW_FLAG_SCALE_MULT | LIBXSMM_MELTW_FLAG_SCALE_ADD_BIAS;
printf("JITing scaling kernel for output... \n");
scaleout_kernel = libxsmm_dispatch_meltw_scale(m, n, &ld_in, &ld_in, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_scaleout_flags, 0);
#endif
/* Calculate blocked results... */
#if 0
optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux);
#else
matrix_copy_NC_to_NCNC( sinp, sout, 1, n, m, block_size, block_size );
optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data);
matrix_copy_NCNC_to_NC( sout, sout_nc, 1, n, m, block_size, block_size );
optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd);
matrix_copy_NCNC_to_NC( dX_bwd, dX_bwd_nc, 1, n, m, block_size, block_size );
#endif
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Output #\n");
printf("##########################################\n");
#if 0
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout, 0, 0);
#else
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, ld_in*n, 1, sout_ref, sout_nc, 0, 0);
#endif
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Mean #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_mean, LIBXSMM_DATATYPE_F32, n, 1, mean_data_ref, mean_data, 0, 0);
printf("L1 reference : %.25g\n", norms_mean.l1_ref);
printf("L1 test : %.25g\n", norms_mean.l1_tst);
printf("L2 abs.error : %.24f\n", norms_mean.l2_abs);
printf("L2 rel.error : %.24f\n", norms_mean.l2_rel);
printf("Linf abs.error: %.24f\n", norms_mean.linf_abs);
printf("Linf rel.error: %.24f\n", norms_mean.linf_rel);
printf("Check-norm : %.24f\n\n", norms_mean.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness FWD - Rstd #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_rstd, LIBXSMM_DATATYPE_F32, n, 1, rstd_data_ref, rstd_data, 0, 0);
printf("L1 reference : %.25g\n", norms_rstd.l1_ref);
printf("L1 test : %.25g\n", norms_rstd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_rstd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_rstd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_rstd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_rstd.linf_rel);
printf("Check-norm : %.24f\n\n", norms_rstd.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dX #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dx, LIBXSMM_DATATYPE_F32, ld_in*n, 1, dX_ref, dX_bwd_nc, 0, 0);
printf("L1 reference : %.25g\n", norms_dx.l1_ref);
printf("L1 test : %.25g\n", norms_dx.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dx.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dx.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dx.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dx.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dx.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dbeta #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dbeta, LIBXSMM_DATATYPE_F32, m, 1, dbeta_ref, dbeta_bwd, 0, 0);
printf("L1 reference : %.25g\n", norms_dbeta.l1_ref);
printf("L1 test : %.25g\n", norms_dbeta.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dbeta.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dbeta.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dbeta.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dbeta.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dbeta.normf_rel);
/* compare */
printf("##########################################\n");
printf("# Correctness BWD - dgamma #\n");
printf("##########################################\n");
libxsmm_matdiff(&norms_dgamma, LIBXSMM_DATATYPE_F32, m, 1, dgamma_ref, dgamma_bwd, 0, 0);
printf("L1 reference : %.25g\n", norms_dgamma.l1_ref);
printf("L1 test : %.25g\n", norms_dgamma.l1_tst);
printf("L2 abs.error : %.24f\n", norms_dgamma.l2_abs);
printf("L2 rel.error : %.24f\n", norms_dgamma.l2_rel);
printf("Linf abs.error: %.24f\n", norms_dgamma.linf_abs);
printf("Linf rel.error: %.24f\n", norms_dgamma.linf_rel);
printf("Check-norm : %.24f\n\n", norms_dgamma.normf_rel);
l_start = libxsmm_timer_tick();
/* Calculate reference results... */
for (k = 0; k < iters; k++) {
naive_layernorm(m, n, ld_in, sinp, gamma, beta, sout_ref, mean_data_ref, rstd_data_ref);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Reference fwd time = %.5g\n", ((double)(l_total)));
l_start = libxsmm_timer_tick();
for (k = 0; k < iters; k++) {
#if 1
optimized_blocked_layernorm(m, n, block_size, block_size, sout, gamma, beta, mean_data, rstd_data);
#else
optimized_layernorm(m, n, ld_in, sinp, gamma, beta, sout, mean_data, rstd_data, reduce_kernel, scalemean_kernel, scaleout_kernel, bias_aux);
#endif
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("Optimized fwd time = %.5g\n", ((double)(l_total2)));
printf("Speedup fwd is = %.5g\n", ((double)(l_total/l_total2)));
l_start = libxsmm_timer_tick();
/* Calculate reference results... */
for (k = 0; k < iters; k++) {
naive_layernorm_bwd(m, n, ld_in, dY_ref, X_ref, mean_ref, rstd_ref, gamma_ref, dX_ref, dgamma_ref, dbeta_ref);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Reference bwd time = %.5g\n", ((double)(l_total)));
l_start = libxsmm_timer_tick();
for (k = 0; k < iters; k++) {
optimized_blocked_layernorm_bwd(m, n, block_size, block_size, dY_bwd, X_bwd, mean_ref, rstd_ref, gamma_ref, dX_bwd, dgamma_bwd, dbeta_bwd);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("Optimized bwd time = %.5g\n", ((double)(l_total2)));
printf("Speedup bwd is = %.5g\n", ((double)(l_total/l_total2)));
/* Free allocated arrays */
free(sinp);
free(gamma);
free(beta);
free(sout);
free(mean_data);
free(rstd_data);
free(mean_data_ref);
free(rstd_data_ref);
free(sout_ref);
free(bias_aux);
free(dY_ref);
free(X_ref);
free(mean_ref);
free(rstd_ref);
free(gamma_ref);
free(dX_ref);
free(dgamma_ref);
free(dbeta_ref);
free(dY_bwd);
free(X_bwd);
free(dX_bwd);
free(dgamma_bwd);
free(dbeta_bwd);
free(dX_bwd_nc);
return EXIT_SUCCESS;
}
|
DRB057-jacobiinitialize-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Use of private() clause
*/
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE;
double alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
#pragma omp parallel for private(j,xx,yy)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
int main()
{
initialize();
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%lf %lf\n", u[i][j], f[i][j]);
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.