Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- llava/lib/libtinfo.a +3 -0
- llava/lib/libz.a +3 -0
- llava/lib/tcl8.6/auto.tcl +648 -0
- llava/lib/tcl8.6/clock.tcl +0 -0
- llava/lib/tcl8.6/history.tcl +335 -0
- llava/lib/tcl8.6/init.tcl +827 -0
- llava/lib/tcl8.6/parray.tcl +28 -0
- llava/lib/tcl8.6/tclAppInit.c +176 -0
- llava/lib/tcl8.6/tclIndex +79 -0
- llava/lib/tcl8.6/tm.tcl +380 -0
- parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h +32 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h +39 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h +395 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h +124 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h +65 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h +242 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h +799 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h +17 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h +313 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h +30 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h +36 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +83 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h +160 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +199 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h +596 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +13 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +358 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +549 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h +43 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h +8 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h +1174 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h +432 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h +469 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h +443 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h +818 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h +1586 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h +93 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h +1341 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h +246 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h +584 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h +286 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h +281 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h +279 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h +1662 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h +513 -0
.gitattributes
CHANGED
|
@@ -383,3 +383,6 @@ llava_next/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSeri
|
|
| 383 |
llava_next/lib/python3.10/pydoc_data/__pycache__/topics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 384 |
llava_next/lib/python3.10/html/__pycache__/entities.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 385 |
parrot/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
llava_next/lib/python3.10/pydoc_data/__pycache__/topics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 384 |
llava_next/lib/python3.10/html/__pycache__/entities.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 385 |
parrot/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 386 |
+
parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 387 |
+
llava/lib/libtinfo.a filter=lfs diff=lfs merge=lfs -text
|
| 388 |
+
llava/lib/libz.a filter=lfs diff=lfs merge=lfs -text
|
llava/lib/libtinfo.a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68510564cd1df8040ea826fc013ad16e413d278270d5d01281c334d75d7c427b
|
| 3 |
+
size 489850
|
llava/lib/libz.a
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4dd684d6a6a4060c8a4193d95db812ca07047890ae0fe8d62cc34bb60370ffc4
|
| 3 |
+
size 165662
|
llava/lib/tcl8.6/auto.tcl
ADDED
|
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# auto.tcl --
|
| 2 |
+
#
|
| 3 |
+
# utility procs formerly in init.tcl dealing with auto execution of commands
|
| 4 |
+
# and can be auto loaded themselves.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 1991-1993 The Regents of the University of California.
|
| 7 |
+
# Copyright (c) 1994-1998 Sun Microsystems, Inc.
|
| 8 |
+
#
|
| 9 |
+
# See the file "license.terms" for information on usage and redistribution of
|
| 10 |
+
# this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
| 11 |
+
#
|
| 12 |
+
|
| 13 |
+
# auto_reset --
|
| 14 |
+
#
|
| 15 |
+
# Destroy all cached information for auto-loading and auto-execution, so that
|
| 16 |
+
# the information gets recomputed the next time it's needed. Also delete any
|
| 17 |
+
# commands that are listed in the auto-load index.
|
| 18 |
+
#
|
| 19 |
+
# Arguments:
|
| 20 |
+
# None.
|
| 21 |
+
|
| 22 |
+
proc auto_reset {} {
|
| 23 |
+
global auto_execs auto_index auto_path
|
| 24 |
+
if {[array exists auto_index]} {
|
| 25 |
+
foreach cmdName [array names auto_index] {
|
| 26 |
+
set fqcn [namespace which $cmdName]
|
| 27 |
+
if {$fqcn eq ""} {
|
| 28 |
+
continue
|
| 29 |
+
}
|
| 30 |
+
rename $fqcn {}
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
unset -nocomplain auto_execs auto_index ::tcl::auto_oldpath
|
| 34 |
+
if {[catch {llength $auto_path}]} {
|
| 35 |
+
set auto_path [list [info library]]
|
| 36 |
+
} elseif {[info library] ni $auto_path} {
|
| 37 |
+
lappend auto_path [info library]
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
# tcl_findLibrary --
|
| 42 |
+
#
|
| 43 |
+
# This is a utility for extensions that searches for a library directory
|
| 44 |
+
# using a canonical searching algorithm. A side effect is to source the
|
| 45 |
+
# initialization script and set a global library variable.
|
| 46 |
+
#
|
| 47 |
+
# Arguments:
|
| 48 |
+
# basename Prefix of the directory name, (e.g., "tk")
|
| 49 |
+
# version Version number of the package, (e.g., "8.0")
|
| 50 |
+
# patch Patchlevel of the package, (e.g., "8.0.3")
|
| 51 |
+
# initScript Initialization script to source (e.g., tk.tcl)
|
| 52 |
+
# enVarName environment variable to honor (e.g., TK_LIBRARY)
|
| 53 |
+
# varName Global variable to set when done (e.g., tk_library)
|
| 54 |
+
|
| 55 |
+
proc tcl_findLibrary {basename version patch initScript enVarName varName} {
|
| 56 |
+
upvar #0 $varName the_library
|
| 57 |
+
global auto_path env tcl_platform
|
| 58 |
+
|
| 59 |
+
set dirs {}
|
| 60 |
+
set errors {}
|
| 61 |
+
|
| 62 |
+
# The C application may have hardwired a path, which we honor
|
| 63 |
+
|
| 64 |
+
if {[info exists the_library] && $the_library ne ""} {
|
| 65 |
+
lappend dirs $the_library
|
| 66 |
+
} else {
|
| 67 |
+
# Do the canonical search
|
| 68 |
+
|
| 69 |
+
# 1. From an environment variable, if it exists. Placing this first
|
| 70 |
+
# gives the end-user ultimate control to work-around any bugs, or
|
| 71 |
+
# to customize.
|
| 72 |
+
|
| 73 |
+
if {[info exists env($enVarName)]} {
|
| 74 |
+
lappend dirs $env($enVarName)
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
# 2. In the package script directory registered within the
|
| 78 |
+
# configuration of the package itself.
|
| 79 |
+
|
| 80 |
+
catch {
|
| 81 |
+
lappend dirs [::${basename}::pkgconfig get scriptdir,runtime]
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
# 3. Relative to auto_path directories. This checks relative to the
|
| 85 |
+
# Tcl library as well as allowing loading of libraries added to the
|
| 86 |
+
# auto_path that is not relative to the core library or binary paths.
|
| 87 |
+
foreach d $auto_path {
|
| 88 |
+
lappend dirs [file join $d $basename$version]
|
| 89 |
+
if {$tcl_platform(platform) eq "unix"
|
| 90 |
+
&& $tcl_platform(os) eq "Darwin"} {
|
| 91 |
+
# 4. On MacOSX, check the Resources/Scripts subdir too
|
| 92 |
+
lappend dirs [file join $d $basename$version Resources Scripts]
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
# 3. Various locations relative to the executable
|
| 97 |
+
# ../lib/foo1.0 (From bin directory in install hierarchy)
|
| 98 |
+
# ../../lib/foo1.0 (From bin/arch directory in install hierarchy)
|
| 99 |
+
# ../library (From unix directory in build hierarchy)
|
| 100 |
+
#
|
| 101 |
+
# Remaining locations are out of date (when relevant, they ought to be
|
| 102 |
+
# covered by the $::auto_path seach above) and disabled.
|
| 103 |
+
#
|
| 104 |
+
# ../../library (From unix/arch directory in build hierarchy)
|
| 105 |
+
# ../../foo1.0.1/library
|
| 106 |
+
# (From unix directory in parallel build hierarchy)
|
| 107 |
+
# ../../../foo1.0.1/library
|
| 108 |
+
# (From unix/arch directory in parallel build hierarchy)
|
| 109 |
+
|
| 110 |
+
set parentDir [file dirname [file dirname [info nameofexecutable]]]
|
| 111 |
+
set grandParentDir [file dirname $parentDir]
|
| 112 |
+
lappend dirs [file join $parentDir lib $basename$version]
|
| 113 |
+
lappend dirs [file join $grandParentDir lib $basename$version]
|
| 114 |
+
lappend dirs [file join $parentDir library]
|
| 115 |
+
if {0} {
|
| 116 |
+
lappend dirs [file join $grandParentDir library]
|
| 117 |
+
lappend dirs [file join $grandParentDir $basename$patch library]
|
| 118 |
+
lappend dirs [file join [file dirname $grandParentDir] \
|
| 119 |
+
$basename$patch library]
|
| 120 |
+
}
|
| 121 |
+
}
|
| 122 |
+
# make $dirs unique, preserving order
|
| 123 |
+
array set seen {}
|
| 124 |
+
foreach i $dirs {
|
| 125 |
+
# Make sure $i is unique under normalization. Avoid repeated [source].
|
| 126 |
+
if {[interp issafe]} {
|
| 127 |
+
# Safe interps have no [file normalize].
|
| 128 |
+
set norm $i
|
| 129 |
+
} else {
|
| 130 |
+
set norm [file normalize $i]
|
| 131 |
+
}
|
| 132 |
+
if {[info exists seen($norm)]} {
|
| 133 |
+
continue
|
| 134 |
+
}
|
| 135 |
+
set seen($norm) {}
|
| 136 |
+
|
| 137 |
+
set the_library $i
|
| 138 |
+
set file [file join $i $initScript]
|
| 139 |
+
|
| 140 |
+
# source everything when in a safe interpreter because we have a
|
| 141 |
+
# source command, but no file exists command
|
| 142 |
+
|
| 143 |
+
if {[interp issafe] || [file exists $file]} {
|
| 144 |
+
if {![catch {uplevel #0 [list source $file]} msg opts]} {
|
| 145 |
+
return
|
| 146 |
+
}
|
| 147 |
+
append errors "$file: $msg\n"
|
| 148 |
+
append errors [dict get $opts -errorinfo]\n
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
unset -nocomplain the_library
|
| 152 |
+
set msg "Can't find a usable $initScript in the following directories: \n"
|
| 153 |
+
append msg " $dirs\n\n"
|
| 154 |
+
append msg "$errors\n\n"
|
| 155 |
+
append msg "This probably means that $basename wasn't installed properly.\n"
|
| 156 |
+
error $msg
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# ----------------------------------------------------------------------
|
| 161 |
+
# auto_mkindex
|
| 162 |
+
# ----------------------------------------------------------------------
|
| 163 |
+
# The following procedures are used to generate the tclIndex file from Tcl
|
| 164 |
+
# source files. They use a special safe interpreter to parse Tcl source
|
| 165 |
+
# files, writing out index entries as "proc" commands are encountered. This
|
| 166 |
+
# implementation won't work in a safe interpreter, since a safe interpreter
|
| 167 |
+
# can't create the special parser and mess with its commands.
|
| 168 |
+
|
| 169 |
+
if {[interp issafe]} {
|
| 170 |
+
return ;# Stop sourcing the file here
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# auto_mkindex --
|
| 174 |
+
# Regenerate a tclIndex file from Tcl source files. Takes as argument the
|
| 175 |
+
# name of the directory in which the tclIndex file is to be placed, followed
|
| 176 |
+
# by any number of glob patterns to use in that directory to locate all of the
|
| 177 |
+
# relevant files.
|
| 178 |
+
#
|
| 179 |
+
# Arguments:
|
| 180 |
+
# dir - Name of the directory in which to create an index.
|
| 181 |
+
|
| 182 |
+
# args - Any number of additional arguments giving the names of files
|
| 183 |
+
# within dir. If no additional are given auto_mkindex will look
|
| 184 |
+
# for *.tcl.
|
| 185 |
+
|
| 186 |
+
proc auto_mkindex {dir args} {
|
| 187 |
+
if {[interp issafe]} {
|
| 188 |
+
error "can't generate index within safe interpreter"
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
set oldDir [pwd]
|
| 192 |
+
cd $dir
|
| 193 |
+
|
| 194 |
+
append index "# Tcl autoload index file, version 2.0\n"
|
| 195 |
+
append index "# This file is generated by the \"auto_mkindex\" command\n"
|
| 196 |
+
append index "# and sourced to set up indexing information for one or\n"
|
| 197 |
+
append index "# more commands. Typically each line is a command that\n"
|
| 198 |
+
append index "# sets an element in the auto_index array, where the\n"
|
| 199 |
+
append index "# element name is the name of a command and the value is\n"
|
| 200 |
+
append index "# a script that loads the command.\n\n"
|
| 201 |
+
if {![llength $args]} {
|
| 202 |
+
set args *.tcl
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
auto_mkindex_parser::init
|
| 206 |
+
foreach file [lsort [glob -- {*}$args]] {
|
| 207 |
+
try {
|
| 208 |
+
append index [auto_mkindex_parser::mkindex $file]
|
| 209 |
+
} on error {msg opts} {
|
| 210 |
+
cd $oldDir
|
| 211 |
+
return -options $opts $msg
|
| 212 |
+
}
|
| 213 |
+
}
|
| 214 |
+
auto_mkindex_parser::cleanup
|
| 215 |
+
|
| 216 |
+
set fid [open "tclIndex" w]
|
| 217 |
+
puts -nonewline $fid $index
|
| 218 |
+
close $fid
|
| 219 |
+
cd $oldDir
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
# Original version of auto_mkindex that just searches the source code for
|
| 223 |
+
# "proc" at the beginning of the line.
|
| 224 |
+
|
| 225 |
+
proc auto_mkindex_old {dir args} {
|
| 226 |
+
set oldDir [pwd]
|
| 227 |
+
cd $dir
|
| 228 |
+
set dir [pwd]
|
| 229 |
+
append index "# Tcl autoload index file, version 2.0\n"
|
| 230 |
+
append index "# This file is generated by the \"auto_mkindex\" command\n"
|
| 231 |
+
append index "# and sourced to set up indexing information for one or\n"
|
| 232 |
+
append index "# more commands. Typically each line is a command that\n"
|
| 233 |
+
append index "# sets an element in the auto_index array, where the\n"
|
| 234 |
+
append index "# element name is the name of a command and the value is\n"
|
| 235 |
+
append index "# a script that loads the command.\n\n"
|
| 236 |
+
if {![llength $args]} {
|
| 237 |
+
set args *.tcl
|
| 238 |
+
}
|
| 239 |
+
foreach file [lsort [glob -- {*}$args]] {
|
| 240 |
+
set f ""
|
| 241 |
+
set error [catch {
|
| 242 |
+
set f [open $file]
|
| 243 |
+
fconfigure $f -eofchar "\x1A {}"
|
| 244 |
+
while {[gets $f line] >= 0} {
|
| 245 |
+
if {[regexp {^proc[ ]+([^ ]*)} $line match procName]} {
|
| 246 |
+
set procName [lindex [auto_qualify $procName "::"] 0]
|
| 247 |
+
append index "set [list auto_index($procName)]"
|
| 248 |
+
append index " \[list source \[file join \$dir [list $file]\]\]\n"
|
| 249 |
+
}
|
| 250 |
+
}
|
| 251 |
+
close $f
|
| 252 |
+
} msg opts]
|
| 253 |
+
if {$error} {
|
| 254 |
+
catch {close $f}
|
| 255 |
+
cd $oldDir
|
| 256 |
+
return -options $opts $msg
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
set f ""
|
| 260 |
+
set error [catch {
|
| 261 |
+
set f [open tclIndex w]
|
| 262 |
+
puts -nonewline $f $index
|
| 263 |
+
close $f
|
| 264 |
+
cd $oldDir
|
| 265 |
+
} msg opts]
|
| 266 |
+
if {$error} {
|
| 267 |
+
catch {close $f}
|
| 268 |
+
cd $oldDir
|
| 269 |
+
error $msg $info $code
|
| 270 |
+
return -options $opts $msg
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
# Create a safe interpreter that can be used to parse Tcl source files
|
| 275 |
+
# generate a tclIndex file for autoloading. This interp contains commands for
|
| 276 |
+
# things that need index entries. Each time a command is executed, it writes
|
| 277 |
+
# an entry out to the index file.
|
| 278 |
+
|
| 279 |
+
namespace eval auto_mkindex_parser {
|
| 280 |
+
variable parser "" ;# parser used to build index
|
| 281 |
+
variable index "" ;# maintains index as it is built
|
| 282 |
+
variable scriptFile "" ;# name of file being processed
|
| 283 |
+
variable contextStack "" ;# stack of namespace scopes
|
| 284 |
+
variable imports "" ;# keeps track of all imported cmds
|
| 285 |
+
variable initCommands ;# list of commands that create aliases
|
| 286 |
+
if {![info exists initCommands]} {
|
| 287 |
+
set initCommands [list]
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
proc init {} {
|
| 291 |
+
variable parser
|
| 292 |
+
variable initCommands
|
| 293 |
+
|
| 294 |
+
if {![interp issafe]} {
|
| 295 |
+
set parser [interp create -safe]
|
| 296 |
+
$parser hide info
|
| 297 |
+
$parser hide rename
|
| 298 |
+
$parser hide proc
|
| 299 |
+
$parser hide namespace
|
| 300 |
+
$parser hide eval
|
| 301 |
+
$parser hide puts
|
| 302 |
+
foreach ns [$parser invokehidden namespace children ::] {
|
| 303 |
+
# MUST NOT DELETE "::tcl" OR BAD THINGS HAPPEN!
|
| 304 |
+
if {$ns eq "::tcl"} continue
|
| 305 |
+
$parser invokehidden namespace delete $ns
|
| 306 |
+
}
|
| 307 |
+
foreach cmd [$parser invokehidden info commands ::*] {
|
| 308 |
+
$parser invokehidden rename $cmd {}
|
| 309 |
+
}
|
| 310 |
+
$parser invokehidden proc unknown {args} {}
|
| 311 |
+
|
| 312 |
+
# We'll need access to the "namespace" command within the
|
| 313 |
+
# interp. Put it back, but move it out of the way.
|
| 314 |
+
|
| 315 |
+
$parser expose namespace
|
| 316 |
+
$parser invokehidden rename namespace _%@namespace
|
| 317 |
+
$parser expose eval
|
| 318 |
+
$parser invokehidden rename eval _%@eval
|
| 319 |
+
|
| 320 |
+
# Install all the registered pseudo-command implementations
|
| 321 |
+
|
| 322 |
+
foreach cmd $initCommands {
|
| 323 |
+
eval $cmd
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
proc cleanup {} {
|
| 328 |
+
variable parser
|
| 329 |
+
interp delete $parser
|
| 330 |
+
unset parser
|
| 331 |
+
}
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
# auto_mkindex_parser::mkindex --
|
| 335 |
+
#
|
| 336 |
+
# Used by the "auto_mkindex" command to create a "tclIndex" file for the given
|
| 337 |
+
# Tcl source file. Executes the commands in the file, and handles things like
|
| 338 |
+
# the "proc" command by adding an entry for the index file. Returns a string
|
| 339 |
+
# that represents the index file.
|
| 340 |
+
#
|
| 341 |
+
# Arguments:
|
| 342 |
+
# file Name of Tcl source file to be indexed.
|
| 343 |
+
|
| 344 |
+
proc auto_mkindex_parser::mkindex {file} {
|
| 345 |
+
variable parser
|
| 346 |
+
variable index
|
| 347 |
+
variable scriptFile
|
| 348 |
+
variable contextStack
|
| 349 |
+
variable imports
|
| 350 |
+
|
| 351 |
+
set scriptFile $file
|
| 352 |
+
|
| 353 |
+
set fid [open $file]
|
| 354 |
+
fconfigure $fid -eofchar "\x1A {}"
|
| 355 |
+
set contents [read $fid]
|
| 356 |
+
close $fid
|
| 357 |
+
|
| 358 |
+
# There is one problem with sourcing files into the safe interpreter:
|
| 359 |
+
# references like "$x" will fail since code is not really being executed
|
| 360 |
+
# and variables do not really exist. To avoid this, we replace all $ with
|
| 361 |
+
# \0 (literally, the null char) later, when getting proc names we will
|
| 362 |
+
# have to reverse this replacement, in case there were any $ in the proc
|
| 363 |
+
# name. This will cause a problem if somebody actually tries to have a \0
|
| 364 |
+
# in their proc name. Too bad for them.
|
| 365 |
+
set contents [string map [list \$ \0] $contents]
|
| 366 |
+
|
| 367 |
+
set index ""
|
| 368 |
+
set contextStack ""
|
| 369 |
+
set imports ""
|
| 370 |
+
|
| 371 |
+
$parser eval $contents
|
| 372 |
+
|
| 373 |
+
foreach name $imports {
|
| 374 |
+
catch {$parser eval [list _%@namespace forget $name]}
|
| 375 |
+
}
|
| 376 |
+
return $index
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
# auto_mkindex_parser::hook command
|
| 380 |
+
#
|
| 381 |
+
# Registers a Tcl command to evaluate when initializing the child interpreter
|
| 382 |
+
# used by the mkindex parser. The command is evaluated in the parent
|
| 383 |
+
# interpreter, and can use the variable auto_mkindex_parser::parser to get to
|
| 384 |
+
# the child
|
| 385 |
+
|
| 386 |
+
proc auto_mkindex_parser::hook {cmd} {
|
| 387 |
+
variable initCommands
|
| 388 |
+
|
| 389 |
+
lappend initCommands $cmd
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
# auto_mkindex_parser::slavehook command
|
| 393 |
+
#
|
| 394 |
+
# Registers a Tcl command to evaluate when initializing the child interpreter
|
| 395 |
+
# used by the mkindex parser. The command is evaluated in the child
|
| 396 |
+
# interpreter.
|
| 397 |
+
|
| 398 |
+
proc auto_mkindex_parser::slavehook {cmd} {
|
| 399 |
+
variable initCommands
|
| 400 |
+
|
| 401 |
+
# The $parser variable is defined to be the name of the child interpreter
|
| 402 |
+
# when this command is used later.
|
| 403 |
+
|
| 404 |
+
lappend initCommands "\$parser eval [list $cmd]"
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
# auto_mkindex_parser::command --
|
| 408 |
+
#
|
| 409 |
+
# Registers a new command with the "auto_mkindex_parser" interpreter that
|
| 410 |
+
# parses Tcl files. These commands are fake versions of things like the
|
| 411 |
+
# "proc" command. When you execute them, they simply write out an entry to a
|
| 412 |
+
# "tclIndex" file for auto-loading.
|
| 413 |
+
#
|
| 414 |
+
# This procedure allows extensions to register their own commands with the
|
| 415 |
+
# auto_mkindex facility. For example, a package like [incr Tcl] might
|
| 416 |
+
# register a "class" command so that class definitions could be added to a
|
| 417 |
+
# "tclIndex" file for auto-loading.
|
| 418 |
+
#
|
| 419 |
+
# Arguments:
|
| 420 |
+
# name Name of command recognized in Tcl files.
|
| 421 |
+
# arglist Argument list for command.
|
| 422 |
+
# body Implementation of command to handle indexing.
|
| 423 |
+
|
| 424 |
+
proc auto_mkindex_parser::command {name arglist body} {
|
| 425 |
+
hook [list auto_mkindex_parser::commandInit $name $arglist $body]
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
# auto_mkindex_parser::commandInit --
|
| 429 |
+
#
|
| 430 |
+
# This does the actual work set up by auto_mkindex_parser::command. This is
|
| 431 |
+
# called when the interpreter used by the parser is created.
|
| 432 |
+
#
|
| 433 |
+
# Arguments:
|
| 434 |
+
# name Name of command recognized in Tcl files.
|
| 435 |
+
# arglist Argument list for command.
|
| 436 |
+
# body Implementation of command to handle indexing.
|
| 437 |
+
|
| 438 |
+
proc auto_mkindex_parser::commandInit {name arglist body} {
|
| 439 |
+
variable parser
|
| 440 |
+
|
| 441 |
+
set ns [namespace qualifiers $name]
|
| 442 |
+
set tail [namespace tail $name]
|
| 443 |
+
if {$ns eq ""} {
|
| 444 |
+
set fakeName [namespace current]::_%@fake_$tail
|
| 445 |
+
} else {
|
| 446 |
+
set fakeName [namespace current]::[string map {:: _} _%@fake_$name]
|
| 447 |
+
}
|
| 448 |
+
proc $fakeName $arglist $body
|
| 449 |
+
|
| 450 |
+
# YUK! Tcl won't let us alias fully qualified command names, so we can't
|
| 451 |
+
# handle names like "::itcl::class". Instead, we have to build procs with
|
| 452 |
+
# the fully qualified names, and have the procs point to the aliases.
|
| 453 |
+
|
| 454 |
+
if {[string match *::* $name]} {
|
| 455 |
+
set exportCmd [list _%@namespace export [namespace tail $name]]
|
| 456 |
+
$parser eval [list _%@namespace eval $ns $exportCmd]
|
| 457 |
+
|
| 458 |
+
# The following proc definition does not work if you want to tolerate
|
| 459 |
+
# space or something else diabolical in the procedure name, (i.e.,
|
| 460 |
+
# space in $alias). The following does not work:
|
| 461 |
+
# "_%@eval {$alias} \$args"
|
| 462 |
+
# because $alias gets concat'ed to $args. The following does not work
|
| 463 |
+
# because $cmd is somehow undefined
|
| 464 |
+
# "set cmd {$alias} \; _%@eval {\$cmd} \$args"
|
| 465 |
+
# A gold star to someone that can make test autoMkindex-3.3 work
|
| 466 |
+
# properly
|
| 467 |
+
|
| 468 |
+
set alias [namespace tail $fakeName]
|
| 469 |
+
$parser invokehidden proc $name {args} "_%@eval {$alias} \$args"
|
| 470 |
+
$parser alias $alias $fakeName
|
| 471 |
+
} else {
|
| 472 |
+
$parser alias $name $fakeName
|
| 473 |
+
}
|
| 474 |
+
return
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
# auto_mkindex_parser::fullname --
|
| 478 |
+
#
|
| 479 |
+
# Used by commands like "proc" within the auto_mkindex parser. Returns the
|
| 480 |
+
# qualified namespace name for the "name" argument. If the "name" does not
|
| 481 |
+
# start with "::", elements are added from the current namespace stack to
|
| 482 |
+
# produce a qualified name. Then, the name is examined to see whether or not
|
| 483 |
+
# it should really be qualified. If the name has more than the leading "::",
|
| 484 |
+
# it is returned as a fully qualified name. Otherwise, it is returned as a
|
| 485 |
+
# simple name. That way, the Tcl autoloader will recognize it properly.
|
| 486 |
+
#
|
| 487 |
+
# Arguments:
|
| 488 |
+
# name - Name that is being added to index.
|
| 489 |
+
|
| 490 |
+
proc auto_mkindex_parser::fullname {name} {
|
| 491 |
+
variable contextStack
|
| 492 |
+
|
| 493 |
+
if {![string match ::* $name]} {
|
| 494 |
+
foreach ns $contextStack {
|
| 495 |
+
set name "${ns}::$name"
|
| 496 |
+
if {[string match ::* $name]} {
|
| 497 |
+
break
|
| 498 |
+
}
|
| 499 |
+
}
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
if {[namespace qualifiers $name] eq ""} {
|
| 503 |
+
set name [namespace tail $name]
|
| 504 |
+
} elseif {![string match ::* $name]} {
|
| 505 |
+
set name "::$name"
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
# Earlier, mkindex replaced all $'s with \0. Now, we have to reverse that
|
| 509 |
+
# replacement.
|
| 510 |
+
return [string map [list \0 \$] $name]
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
# auto_mkindex_parser::indexEntry --
|
| 514 |
+
#
|
| 515 |
+
# Used by commands like "proc" within the auto_mkindex parser to add a
|
| 516 |
+
# correctly-quoted entry to the index. This is shared code so it is done
|
| 517 |
+
# *right*, in one place.
|
| 518 |
+
#
|
| 519 |
+
# Arguments:
|
| 520 |
+
# name - Name that is being added to index.
|
| 521 |
+
|
| 522 |
+
proc auto_mkindex_parser::indexEntry {name} {
|
| 523 |
+
variable index
|
| 524 |
+
variable scriptFile
|
| 525 |
+
|
| 526 |
+
# We convert all metacharacters to their backslashed form, and pre-split
|
| 527 |
+
# the file name that we know about (which will be a proper list, and so
|
| 528 |
+
# correctly quoted).
|
| 529 |
+
|
| 530 |
+
set name [string range [list \}[fullname $name]] 2 end]
|
| 531 |
+
set filenameParts [file split $scriptFile]
|
| 532 |
+
|
| 533 |
+
append index [format \
|
| 534 |
+
{set auto_index(%s) [list source [file join $dir %s]]%s} \
|
| 535 |
+
$name $filenameParts \n]
|
| 536 |
+
return
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
if {[llength $::auto_mkindex_parser::initCommands]} {
|
| 540 |
+
return
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
# Register all of the procedures for the auto_mkindex parser that will build
|
| 544 |
+
# the "tclIndex" file.
|
| 545 |
+
|
| 546 |
+
# AUTO MKINDEX: proc name arglist body
|
| 547 |
+
# Adds an entry to the auto index list for the given procedure name.
|
| 548 |
+
|
| 549 |
+
auto_mkindex_parser::command proc {name args} {
|
| 550 |
+
indexEntry $name
|
| 551 |
+
}
|
| 552 |
+
|
| 553 |
+
# Conditionally add support for Tcl byte code files. There are some tricky
|
| 554 |
+
# details here. First, we need to get the tbcload library initialized in the
|
| 555 |
+
# current interpreter. We cannot load tbcload into the child until we have
|
| 556 |
+
# done so because it needs access to the tcl_patchLevel variable. Second,
|
| 557 |
+
# because the package index file may defer loading the library until we invoke
|
| 558 |
+
# a command, we need to explicitly invoke auto_load to force it to be loaded.
|
| 559 |
+
# This should be a noop if the package has already been loaded
|
| 560 |
+
|
| 561 |
+
auto_mkindex_parser::hook {
|
| 562 |
+
try {
|
| 563 |
+
package require tbcload
|
| 564 |
+
} on error {} {
|
| 565 |
+
# OK, don't have it so do nothing
|
| 566 |
+
} on ok {} {
|
| 567 |
+
if {[namespace which -command tbcload::bcproc] eq ""} {
|
| 568 |
+
auto_load tbcload::bcproc
|
| 569 |
+
}
|
| 570 |
+
load {} tbcload $auto_mkindex_parser::parser
|
| 571 |
+
|
| 572 |
+
# AUTO MKINDEX: tbcload::bcproc name arglist body
|
| 573 |
+
# Adds an entry to the auto index list for the given precompiled
|
| 574 |
+
# procedure name.
|
| 575 |
+
|
| 576 |
+
auto_mkindex_parser::commandInit tbcload::bcproc {name args} {
|
| 577 |
+
indexEntry $name
|
| 578 |
+
}
|
| 579 |
+
}
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
# AUTO MKINDEX: namespace eval name command ?arg arg...?
|
| 583 |
+
# Adds the namespace name onto the context stack and evaluates the associated
|
| 584 |
+
# body of commands.
|
| 585 |
+
#
|
| 586 |
+
# AUTO MKINDEX: namespace import ?-force? pattern ?pattern...?
|
| 587 |
+
# Performs the "import" action in the parser interpreter. This is important
|
| 588 |
+
# for any commands contained in a namespace that affect the index. For
|
| 589 |
+
# example, a script may say "itcl::class ...", or it may import "itcl::*" and
|
| 590 |
+
# then say "class ...". This procedure does the import operation, but keeps
|
| 591 |
+
# track of imported patterns so we can remove the imports later.
|
| 592 |
+
|
| 593 |
+
auto_mkindex_parser::command namespace {op args} {
|
| 594 |
+
switch -- $op {
|
| 595 |
+
eval {
|
| 596 |
+
variable parser
|
| 597 |
+
variable contextStack
|
| 598 |
+
|
| 599 |
+
set name [lindex $args 0]
|
| 600 |
+
set args [lrange $args 1 end]
|
| 601 |
+
|
| 602 |
+
set contextStack [linsert $contextStack 0 $name]
|
| 603 |
+
$parser eval [list _%@namespace eval $name] $args
|
| 604 |
+
set contextStack [lrange $contextStack 1 end]
|
| 605 |
+
}
|
| 606 |
+
import {
|
| 607 |
+
variable parser
|
| 608 |
+
variable imports
|
| 609 |
+
foreach pattern $args {
|
| 610 |
+
if {$pattern ne "-force"} {
|
| 611 |
+
lappend imports $pattern
|
| 612 |
+
}
|
| 613 |
+
}
|
| 614 |
+
catch {$parser eval "_%@namespace import $args"}
|
| 615 |
+
}
|
| 616 |
+
ensemble {
|
| 617 |
+
variable parser
|
| 618 |
+
variable contextStack
|
| 619 |
+
if {[lindex $args 0] eq "create"} {
|
| 620 |
+
set name ::[join [lreverse $contextStack] ::]
|
| 621 |
+
catch {
|
| 622 |
+
set name [dict get [lrange $args 1 end] -command]
|
| 623 |
+
if {![string match ::* $name]} {
|
| 624 |
+
set name ::[join [lreverse $contextStack] ::]$name
|
| 625 |
+
}
|
| 626 |
+
regsub -all ::+ $name :: name
|
| 627 |
+
}
|
| 628 |
+
# create artificial proc to force an entry in the tclIndex
|
| 629 |
+
$parser eval [list ::proc $name {} {}]
|
| 630 |
+
}
|
| 631 |
+
}
|
| 632 |
+
}
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
# AUTO MKINDEX: oo::class create name ?definition?
|
| 636 |
+
# Adds an entry to the auto index list for the given class name.
|
| 637 |
+
auto_mkindex_parser::command oo::class {op name {body ""}} {
|
| 638 |
+
if {$op eq "create"} {
|
| 639 |
+
indexEntry $name
|
| 640 |
+
}
|
| 641 |
+
}
|
| 642 |
+
auto_mkindex_parser::command class {op name {body ""}} {
|
| 643 |
+
if {$op eq "create"} {
|
| 644 |
+
indexEntry $name
|
| 645 |
+
}
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
return
|
llava/lib/tcl8.6/clock.tcl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava/lib/tcl8.6/history.tcl
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# history.tcl --
|
| 2 |
+
#
|
| 3 |
+
# Implementation of the history command.
|
| 4 |
+
#
|
| 5 |
+
# Copyright (c) 1997 Sun Microsystems, Inc.
|
| 6 |
+
#
|
| 7 |
+
# See the file "license.terms" for information on usage and redistribution of
|
| 8 |
+
# this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
| 9 |
+
#
|
| 10 |
+
|
| 11 |
+
# The tcl::history array holds the history list and some additional
|
| 12 |
+
# bookkeeping variables.
|
| 13 |
+
#
|
| 14 |
+
# nextid the index used for the next history list item.
|
| 15 |
+
# keep the max size of the history list
|
| 16 |
+
# oldest the index of the oldest item in the history.
|
| 17 |
+
|
| 18 |
+
namespace eval ::tcl {
|
| 19 |
+
variable history
|
| 20 |
+
if {![info exists history]} {
|
| 21 |
+
array set history {
|
| 22 |
+
nextid 0
|
| 23 |
+
keep 20
|
| 24 |
+
oldest -20
|
| 25 |
+
}
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
namespace ensemble create -command ::tcl::history -map {
|
| 29 |
+
add ::tcl::HistAdd
|
| 30 |
+
change ::tcl::HistChange
|
| 31 |
+
clear ::tcl::HistClear
|
| 32 |
+
event ::tcl::HistEvent
|
| 33 |
+
info ::tcl::HistInfo
|
| 34 |
+
keep ::tcl::HistKeep
|
| 35 |
+
nextid ::tcl::HistNextID
|
| 36 |
+
redo ::tcl::HistRedo
|
| 37 |
+
}
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
# history --
|
| 41 |
+
#
|
| 42 |
+
# This is the main history command. See the man page for its interface.
|
| 43 |
+
# This does some argument checking and calls the helper ensemble in the
|
| 44 |
+
# tcl namespace.
|
| 45 |
+
|
| 46 |
+
proc ::history {args} {
|
| 47 |
+
# If no command given, we're doing 'history info'. Can't be done with an
|
| 48 |
+
# ensemble unknown handler, as those don't fire when no subcommand is
|
| 49 |
+
# given at all.
|
| 50 |
+
|
| 51 |
+
if {![llength $args]} {
|
| 52 |
+
set args info
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
# Tricky stuff needed to make stack and errors come out right!
|
| 56 |
+
tailcall apply {arglist {tailcall ::tcl::history {*}$arglist} ::tcl} $args
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# (unnamed) --
|
| 60 |
+
#
|
| 61 |
+
# Callback when [::history] is destroyed. Destroys the implementation.
|
| 62 |
+
#
|
| 63 |
+
# Parameters:
|
| 64 |
+
# oldName what the command was called.
|
| 65 |
+
# newName what the command is now called (an empty string).
|
| 66 |
+
# op the operation (= delete).
|
| 67 |
+
#
|
| 68 |
+
# Results:
|
| 69 |
+
# none
|
| 70 |
+
#
|
| 71 |
+
# Side Effects:
|
| 72 |
+
# The implementation of the [::history] command ceases to exist.
|
| 73 |
+
|
| 74 |
+
trace add command ::history delete [list apply {{oldName newName op} {
|
| 75 |
+
variable history
|
| 76 |
+
unset -nocomplain history
|
| 77 |
+
foreach c [info procs ::tcl::Hist*] {
|
| 78 |
+
rename $c {}
|
| 79 |
+
}
|
| 80 |
+
rename ::tcl::history {}
|
| 81 |
+
} ::tcl}]
|
| 82 |
+
|
| 83 |
+
# tcl::HistAdd --
|
| 84 |
+
#
|
| 85 |
+
# Add an item to the history, and optionally eval it at the global scope
|
| 86 |
+
#
|
| 87 |
+
# Parameters:
|
| 88 |
+
# event the command to add
|
| 89 |
+
# exec (optional) a substring of "exec" causes the command to
|
| 90 |
+
# be evaled.
|
| 91 |
+
# Results:
|
| 92 |
+
# If executing, then the results of the command are returned
|
| 93 |
+
#
|
| 94 |
+
# Side Effects:
|
| 95 |
+
# Adds to the history list
|
| 96 |
+
|
| 97 |
+
proc ::tcl::HistAdd {event {exec {}}} {
|
| 98 |
+
variable history
|
| 99 |
+
|
| 100 |
+
if {
|
| 101 |
+
[prefix longest {exec {}} $exec] eq ""
|
| 102 |
+
&& [llength [info level 0]] == 3
|
| 103 |
+
} then {
|
| 104 |
+
return -code error "bad argument \"$exec\": should be \"exec\""
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
# Do not add empty commands to the history
|
| 108 |
+
if {[string trim $event] eq ""} {
|
| 109 |
+
return ""
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
# Maintain the history
|
| 113 |
+
set history([incr history(nextid)]) $event
|
| 114 |
+
unset -nocomplain history([incr history(oldest)])
|
| 115 |
+
|
| 116 |
+
# Only execute if 'exec' (or non-empty prefix of it) given
|
| 117 |
+
if {$exec eq ""} {
|
| 118 |
+
return ""
|
| 119 |
+
}
|
| 120 |
+
tailcall eval $event
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
# tcl::HistKeep --
|
| 124 |
+
#
|
| 125 |
+
# Set or query the limit on the length of the history list
|
| 126 |
+
#
|
| 127 |
+
# Parameters:
|
| 128 |
+
# limit (optional) the length of the history list
|
| 129 |
+
#
|
| 130 |
+
# Results:
|
| 131 |
+
# If no limit is specified, the current limit is returned
|
| 132 |
+
#
|
| 133 |
+
# Side Effects:
|
| 134 |
+
# Updates history(keep) if a limit is specified
|
| 135 |
+
|
| 136 |
+
proc ::tcl::HistKeep {{count {}}} {
|
| 137 |
+
variable history
|
| 138 |
+
if {[llength [info level 0]] == 1} {
|
| 139 |
+
return $history(keep)
|
| 140 |
+
}
|
| 141 |
+
if {![string is integer -strict $count] || ($count < 0)} {
|
| 142 |
+
return -code error "illegal keep count \"$count\""
|
| 143 |
+
}
|
| 144 |
+
set oldold $history(oldest)
|
| 145 |
+
set history(oldest) [expr {$history(nextid) - $count}]
|
| 146 |
+
for {} {$oldold <= $history(oldest)} {incr oldold} {
|
| 147 |
+
unset -nocomplain history($oldold)
|
| 148 |
+
}
|
| 149 |
+
set history(keep) $count
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
# tcl::HistClear --
|
| 153 |
+
#
|
| 154 |
+
# Erase the history list
|
| 155 |
+
#
|
| 156 |
+
# Parameters:
|
| 157 |
+
# none
|
| 158 |
+
#
|
| 159 |
+
# Results:
|
| 160 |
+
# none
|
| 161 |
+
#
|
| 162 |
+
# Side Effects:
|
| 163 |
+
# Resets the history array, except for the keep limit
|
| 164 |
+
|
| 165 |
+
proc ::tcl::HistClear {} {
|
| 166 |
+
variable history
|
| 167 |
+
set keep $history(keep)
|
| 168 |
+
unset history
|
| 169 |
+
array set history [list \
|
| 170 |
+
nextid 0 \
|
| 171 |
+
keep $keep \
|
| 172 |
+
oldest -$keep \
|
| 173 |
+
]
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
# tcl::HistInfo --
|
| 177 |
+
#
|
| 178 |
+
# Return a pretty-printed version of the history list
|
| 179 |
+
#
|
| 180 |
+
# Parameters:
|
| 181 |
+
# num (optional) the length of the history list to return
|
| 182 |
+
#
|
| 183 |
+
# Results:
|
| 184 |
+
# A formatted history list
|
| 185 |
+
|
| 186 |
+
proc ::tcl::HistInfo {{count {}}} {
|
| 187 |
+
variable history
|
| 188 |
+
if {[llength [info level 0]] == 1} {
|
| 189 |
+
set count [expr {$history(keep) + 1}]
|
| 190 |
+
} elseif {![string is integer -strict $count]} {
|
| 191 |
+
return -code error "bad integer \"$count\""
|
| 192 |
+
}
|
| 193 |
+
set result {}
|
| 194 |
+
set newline ""
|
| 195 |
+
for {set i [expr {$history(nextid) - $count + 1}]} \
|
| 196 |
+
{$i <= $history(nextid)} {incr i} {
|
| 197 |
+
if {![info exists history($i)]} {
|
| 198 |
+
continue
|
| 199 |
+
}
|
| 200 |
+
set cmd [string map [list \n \n\t] [string trimright $history($i) \ \n]]
|
| 201 |
+
append result $newline[format "%6d %s" $i $cmd]
|
| 202 |
+
set newline \n
|
| 203 |
+
}
|
| 204 |
+
return $result
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
# tcl::HistRedo --
|
| 208 |
+
#
|
| 209 |
+
# Fetch the previous or specified event, execute it, and then replace
|
| 210 |
+
# the current history item with that event.
|
| 211 |
+
#
|
| 212 |
+
# Parameters:
|
| 213 |
+
# event (optional) index of history item to redo. Defaults to -1,
|
| 214 |
+
# which means the previous event.
|
| 215 |
+
#
|
| 216 |
+
# Results:
|
| 217 |
+
# Those of the command being redone.
|
| 218 |
+
#
|
| 219 |
+
# Side Effects:
|
| 220 |
+
# Replaces the current history list item with the one being redone.
|
| 221 |
+
|
| 222 |
+
proc ::tcl::HistRedo {{event -1}} {
|
| 223 |
+
variable history
|
| 224 |
+
|
| 225 |
+
set i [HistIndex $event]
|
| 226 |
+
if {$i == $history(nextid)} {
|
| 227 |
+
return -code error "cannot redo the current event"
|
| 228 |
+
}
|
| 229 |
+
set cmd $history($i)
|
| 230 |
+
HistChange $cmd 0
|
| 231 |
+
tailcall eval $cmd
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
# tcl::HistIndex --
|
| 235 |
+
#
|
| 236 |
+
# Map from an event specifier to an index in the history list.
|
| 237 |
+
#
|
| 238 |
+
# Parameters:
|
| 239 |
+
# event index of history item to redo.
|
| 240 |
+
# If this is a positive number, it is used directly.
|
| 241 |
+
# If it is a negative number, then it counts back to a previous
|
| 242 |
+
# event, where -1 is the most recent event.
|
| 243 |
+
# A string can be matched, either by being the prefix of a
|
| 244 |
+
# command or by matching a command with string match.
|
| 245 |
+
#
|
| 246 |
+
# Results:
|
| 247 |
+
# The index into history, or an error if the index didn't match.
|
| 248 |
+
|
| 249 |
+
proc ::tcl::HistIndex {event} {
|
| 250 |
+
variable history
|
| 251 |
+
if {![string is integer -strict $event]} {
|
| 252 |
+
for {set i [expr {$history(nextid)-1}]} {[info exists history($i)]} \
|
| 253 |
+
{incr i -1} {
|
| 254 |
+
if {[string match $event* $history($i)]} {
|
| 255 |
+
return $i
|
| 256 |
+
}
|
| 257 |
+
if {[string match $event $history($i)]} {
|
| 258 |
+
return $i
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
return -code error "no event matches \"$event\""
|
| 262 |
+
} elseif {$event <= 0} {
|
| 263 |
+
set i [expr {$history(nextid) + $event}]
|
| 264 |
+
} else {
|
| 265 |
+
set i $event
|
| 266 |
+
}
|
| 267 |
+
if {$i <= $history(oldest)} {
|
| 268 |
+
return -code error "event \"$event\" is too far in the past"
|
| 269 |
+
}
|
| 270 |
+
if {$i > $history(nextid)} {
|
| 271 |
+
return -code error "event \"$event\" hasn't occurred yet"
|
| 272 |
+
}
|
| 273 |
+
return $i
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
# tcl::HistEvent --
|
| 277 |
+
#
|
| 278 |
+
# Map from an event specifier to the value in the history list.
|
| 279 |
+
#
|
| 280 |
+
# Parameters:
|
| 281 |
+
# event index of history item to redo. See index for a description of
|
| 282 |
+
# possible event patterns.
|
| 283 |
+
#
|
| 284 |
+
# Results:
|
| 285 |
+
# The value from the history list.
|
| 286 |
+
|
| 287 |
+
proc ::tcl::HistEvent {{event -1}} {
|
| 288 |
+
variable history
|
| 289 |
+
set i [HistIndex $event]
|
| 290 |
+
if {![info exists history($i)]} {
|
| 291 |
+
return ""
|
| 292 |
+
}
|
| 293 |
+
return [string trimright $history($i) \ \n]
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
# tcl::HistChange --
|
| 297 |
+
#
|
| 298 |
+
# Replace a value in the history list.
|
| 299 |
+
#
|
| 300 |
+
# Parameters:
|
| 301 |
+
# newValue The new value to put into the history list.
|
| 302 |
+
# event (optional) index of history item to redo. See index for a
|
| 303 |
+
# description of possible event patterns. This defaults to 0,
|
| 304 |
+
# which specifies the current event.
|
| 305 |
+
#
|
| 306 |
+
# Side Effects:
|
| 307 |
+
# Changes the history list.
|
| 308 |
+
|
| 309 |
+
proc ::tcl::HistChange {newValue {event 0}} {
|
| 310 |
+
variable history
|
| 311 |
+
set i [HistIndex $event]
|
| 312 |
+
set history($i) $newValue
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
# tcl::HistNextID --
|
| 316 |
+
#
|
| 317 |
+
# Returns the number of the next history event.
|
| 318 |
+
#
|
| 319 |
+
# Parameters:
|
| 320 |
+
# None.
|
| 321 |
+
#
|
| 322 |
+
# Side Effects:
|
| 323 |
+
# None.
|
| 324 |
+
|
| 325 |
+
proc ::tcl::HistNextID {} {
|
| 326 |
+
variable history
|
| 327 |
+
return [expr {$history(nextid) + 1}]
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
return
|
| 331 |
+
|
| 332 |
+
# Local Variables:
|
| 333 |
+
# mode: tcl
|
| 334 |
+
# fill-column: 78
|
| 335 |
+
# End:
|
llava/lib/tcl8.6/init.tcl
ADDED
|
@@ -0,0 +1,827 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# init.tcl --
|
| 2 |
+
#
|
| 3 |
+
# Default system startup file for Tcl-based applications. Defines
|
| 4 |
+
# "unknown" procedure and auto-load facilities.
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 1991-1993 The Regents of the University of California.
|
| 7 |
+
# Copyright (c) 1994-1996 Sun Microsystems, Inc.
|
| 8 |
+
# Copyright (c) 1998-1999 Scriptics Corporation.
|
| 9 |
+
# Copyright (c) 2004 Kevin B. Kenny. All rights reserved.
|
| 10 |
+
#
|
| 11 |
+
# See the file "license.terms" for information on usage and redistribution
|
| 12 |
+
# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
| 13 |
+
#
|
| 14 |
+
|
| 15 |
+
# This test intentionally written in pre-7.5 Tcl
|
| 16 |
+
if {[info commands package] == ""} {
|
| 17 |
+
error "version mismatch: library\nscripts expect Tcl version 7.5b1 or later but the loaded version is\nonly [info patchlevel]"
|
| 18 |
+
}
|
| 19 |
+
package require -exact Tcl 8.6.14
|
| 20 |
+
|
| 21 |
+
# Compute the auto path to use in this interpreter.
|
| 22 |
+
# The values on the path come from several locations:
|
| 23 |
+
#
|
| 24 |
+
# The environment variable TCLLIBPATH
|
| 25 |
+
#
|
| 26 |
+
# tcl_library, which is the directory containing this init.tcl script.
|
| 27 |
+
# [tclInit] (Tcl_Init()) searches around for the directory containing this
|
| 28 |
+
# init.tcl and defines tcl_library to that location before sourcing it.
|
| 29 |
+
#
|
| 30 |
+
# The parent directory of tcl_library. Adding the parent
|
| 31 |
+
# means that packages in peer directories will be found automatically.
|
| 32 |
+
#
|
| 33 |
+
# Also add the directory ../lib relative to the directory where the
|
| 34 |
+
# executable is located. This is meant to find binary packages for the
|
| 35 |
+
# same architecture as the current executable.
|
| 36 |
+
#
|
| 37 |
+
# tcl_pkgPath, which is set by the platform-specific initialization routines
|
| 38 |
+
# On UNIX it is compiled in
|
| 39 |
+
# On Windows, it is not used
|
| 40 |
+
#
|
| 41 |
+
# (Ticket 41c9857bdd) In a safe interpreter, this file does not set
|
| 42 |
+
# ::auto_path (other than to {} if it is undefined). The caller, typically
|
| 43 |
+
# a Safe Base command, is responsible for setting ::auto_path.
|
| 44 |
+
|
| 45 |
+
if {![info exists auto_path]} {
|
| 46 |
+
if {[info exists env(TCLLIBPATH)] && (![interp issafe])} {
|
| 47 |
+
set auto_path $env(TCLLIBPATH)
|
| 48 |
+
} else {
|
| 49 |
+
set auto_path ""
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
namespace eval tcl {
|
| 53 |
+
if {![interp issafe]} {
|
| 54 |
+
variable Dir
|
| 55 |
+
foreach Dir [list $::tcl_library [file dirname $::tcl_library]] {
|
| 56 |
+
if {$Dir ni $::auto_path} {
|
| 57 |
+
lappend ::auto_path $Dir
|
| 58 |
+
}
|
| 59 |
+
}
|
| 60 |
+
set Dir [file join [file dirname [file dirname \
|
| 61 |
+
[info nameofexecutable]]] lib]
|
| 62 |
+
if {$Dir ni $::auto_path} {
|
| 63 |
+
lappend ::auto_path $Dir
|
| 64 |
+
}
|
| 65 |
+
if {[info exists ::tcl_pkgPath]} { catch {
|
| 66 |
+
foreach Dir $::tcl_pkgPath {
|
| 67 |
+
if {$Dir ni $::auto_path} {
|
| 68 |
+
lappend ::auto_path $Dir
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
}}
|
| 72 |
+
|
| 73 |
+
variable Path [encoding dirs]
|
| 74 |
+
set Dir [file join $::tcl_library encoding]
|
| 75 |
+
if {$Dir ni $Path} {
|
| 76 |
+
lappend Path $Dir
|
| 77 |
+
encoding dirs $Path
|
| 78 |
+
}
|
| 79 |
+
unset Dir Path
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
# TIP #255 min and max functions
|
| 83 |
+
namespace eval mathfunc {
|
| 84 |
+
proc min {args} {
|
| 85 |
+
if {![llength $args]} {
|
| 86 |
+
return -code error \
|
| 87 |
+
"not enough arguments to math function \"min\""
|
| 88 |
+
}
|
| 89 |
+
set val Inf
|
| 90 |
+
foreach arg $args {
|
| 91 |
+
# This will handle forcing the numeric value without
|
| 92 |
+
# ruining the internal type of a numeric object
|
| 93 |
+
if {[catch {expr {double($arg)}} err]} {
|
| 94 |
+
return -code error $err
|
| 95 |
+
}
|
| 96 |
+
if {$arg < $val} {set val $arg}
|
| 97 |
+
}
|
| 98 |
+
return $val
|
| 99 |
+
}
|
| 100 |
+
proc max {args} {
|
| 101 |
+
if {![llength $args]} {
|
| 102 |
+
return -code error \
|
| 103 |
+
"not enough arguments to math function \"max\""
|
| 104 |
+
}
|
| 105 |
+
set val -Inf
|
| 106 |
+
foreach arg $args {
|
| 107 |
+
# This will handle forcing the numeric value without
|
| 108 |
+
# ruining the internal type of a numeric object
|
| 109 |
+
if {[catch {expr {double($arg)}} err]} {
|
| 110 |
+
return -code error $err
|
| 111 |
+
}
|
| 112 |
+
if {$arg > $val} {set val $arg}
|
| 113 |
+
}
|
| 114 |
+
return $val
|
| 115 |
+
}
|
| 116 |
+
namespace export min max
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
# Windows specific end of initialization
|
| 121 |
+
|
| 122 |
+
if {(![interp issafe]) && ($tcl_platform(platform) eq "windows")} {
|
| 123 |
+
namespace eval tcl {
|
| 124 |
+
proc EnvTraceProc {lo n1 n2 op} {
|
| 125 |
+
global env
|
| 126 |
+
set x $env($n2)
|
| 127 |
+
set env($lo) $x
|
| 128 |
+
set env([string toupper $lo]) $x
|
| 129 |
+
}
|
| 130 |
+
proc InitWinEnv {} {
|
| 131 |
+
global env tcl_platform
|
| 132 |
+
foreach p [array names env] {
|
| 133 |
+
set u [string toupper $p]
|
| 134 |
+
if {$u ne $p} {
|
| 135 |
+
switch -- $u {
|
| 136 |
+
COMSPEC -
|
| 137 |
+
PATH {
|
| 138 |
+
set temp $env($p)
|
| 139 |
+
unset env($p)
|
| 140 |
+
set env($u) $temp
|
| 141 |
+
trace add variable env($p) write \
|
| 142 |
+
[namespace code [list EnvTraceProc $p]]
|
| 143 |
+
trace add variable env($u) write \
|
| 144 |
+
[namespace code [list EnvTraceProc $p]]
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
if {![info exists env(COMSPEC)]} {
|
| 150 |
+
set env(COMSPEC) cmd.exe
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
InitWinEnv
|
| 154 |
+
}
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
# Setup the unknown package handler
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
if {[interp issafe]} {
|
| 161 |
+
package unknown {::tcl::tm::UnknownHandler ::tclPkgUnknown}
|
| 162 |
+
} else {
|
| 163 |
+
# Set up search for Tcl Modules (TIP #189).
|
| 164 |
+
# and setup platform specific unknown package handlers
|
| 165 |
+
if {$tcl_platform(os) eq "Darwin"
|
| 166 |
+
&& $tcl_platform(platform) eq "unix"} {
|
| 167 |
+
package unknown {::tcl::tm::UnknownHandler \
|
| 168 |
+
{::tcl::MacOSXPkgUnknown ::tclPkgUnknown}}
|
| 169 |
+
} else {
|
| 170 |
+
package unknown {::tcl::tm::UnknownHandler ::tclPkgUnknown}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
# Set up the 'clock' ensemble
|
| 174 |
+
|
| 175 |
+
namespace eval ::tcl::clock [list variable TclLibDir $::tcl_library]
|
| 176 |
+
|
| 177 |
+
proc ::tcl::initClock {} {
|
| 178 |
+
# Auto-loading stubs for 'clock.tcl'
|
| 179 |
+
|
| 180 |
+
foreach cmd {add format scan} {
|
| 181 |
+
proc ::tcl::clock::$cmd args {
|
| 182 |
+
variable TclLibDir
|
| 183 |
+
source -encoding utf-8 [file join $TclLibDir clock.tcl]
|
| 184 |
+
return [uplevel 1 [info level 0]]
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
rename ::tcl::initClock {}
|
| 189 |
+
}
|
| 190 |
+
::tcl::initClock
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
# Conditionalize for presence of exec.
|
| 194 |
+
|
| 195 |
+
if {[namespace which -command exec] eq ""} {
|
| 196 |
+
|
| 197 |
+
# Some machines do not have exec. Also, on all
|
| 198 |
+
# platforms, safe interpreters do not have exec.
|
| 199 |
+
|
| 200 |
+
set auto_noexec 1
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
# Define a log command (which can be overwritten to log errors
|
| 204 |
+
# differently, specially when stderr is not available)
|
| 205 |
+
|
| 206 |
+
if {[namespace which -command tclLog] eq ""} {
|
| 207 |
+
proc tclLog {string} {
|
| 208 |
+
catch {puts stderr $string}
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
# unknown --
|
| 213 |
+
# This procedure is called when a Tcl command is invoked that doesn't
|
| 214 |
+
# exist in the interpreter. It takes the following steps to make the
|
| 215 |
+
# command available:
|
| 216 |
+
#
|
| 217 |
+
# 1. See if the autoload facility can locate the command in a
|
| 218 |
+
# Tcl script file. If so, load it and execute it.
|
| 219 |
+
# 2. If the command was invoked interactively at top-level:
|
| 220 |
+
# (a) see if the command exists as an executable UNIX program.
|
| 221 |
+
# If so, "exec" the command.
|
| 222 |
+
# (b) see if the command requests csh-like history substitution
|
| 223 |
+
# in one of the common forms !!, !<number>, or ^old^new. If
|
| 224 |
+
# so, emulate csh's history substitution.
|
| 225 |
+
# (c) see if the command is a unique abbreviation for another
|
| 226 |
+
# command. If so, invoke the command.
|
| 227 |
+
#
|
| 228 |
+
# Arguments:
|
| 229 |
+
# args - A list whose elements are the words of the original
|
| 230 |
+
# command, including the command name.
|
| 231 |
+
|
| 232 |
+
proc unknown args {
|
| 233 |
+
variable ::tcl::UnknownPending
|
| 234 |
+
global auto_noexec auto_noload env tcl_interactive errorInfo errorCode
|
| 235 |
+
|
| 236 |
+
if {[info exists errorInfo]} {
|
| 237 |
+
set savedErrorInfo $errorInfo
|
| 238 |
+
}
|
| 239 |
+
if {[info exists errorCode]} {
|
| 240 |
+
set savedErrorCode $errorCode
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
set name [lindex $args 0]
|
| 244 |
+
if {![info exists auto_noload]} {
|
| 245 |
+
#
|
| 246 |
+
# Make sure we're not trying to load the same proc twice.
|
| 247 |
+
#
|
| 248 |
+
if {[info exists UnknownPending($name)]} {
|
| 249 |
+
return -code error "self-referential recursion\
|
| 250 |
+
in \"unknown\" for command \"$name\""
|
| 251 |
+
}
|
| 252 |
+
set UnknownPending($name) pending
|
| 253 |
+
set ret [catch {
|
| 254 |
+
auto_load $name [uplevel 1 {::namespace current}]
|
| 255 |
+
} msg opts]
|
| 256 |
+
unset UnknownPending($name)
|
| 257 |
+
if {$ret != 0} {
|
| 258 |
+
dict append opts -errorinfo "\n (autoloading \"$name\")"
|
| 259 |
+
return -options $opts $msg
|
| 260 |
+
}
|
| 261 |
+
if {![array size UnknownPending]} {
|
| 262 |
+
unset UnknownPending
|
| 263 |
+
}
|
| 264 |
+
if {$msg} {
|
| 265 |
+
if {[info exists savedErrorCode]} {
|
| 266 |
+
set ::errorCode $savedErrorCode
|
| 267 |
+
} else {
|
| 268 |
+
unset -nocomplain ::errorCode
|
| 269 |
+
}
|
| 270 |
+
if {[info exists savedErrorInfo]} {
|
| 271 |
+
set errorInfo $savedErrorInfo
|
| 272 |
+
} else {
|
| 273 |
+
unset -nocomplain errorInfo
|
| 274 |
+
}
|
| 275 |
+
set code [catch {uplevel 1 $args} msg opts]
|
| 276 |
+
if {$code == 1} {
|
| 277 |
+
#
|
| 278 |
+
# Compute stack trace contribution from the [uplevel].
|
| 279 |
+
# Note the dependence on how Tcl_AddErrorInfo, etc.
|
| 280 |
+
# construct the stack trace.
|
| 281 |
+
#
|
| 282 |
+
set errInfo [dict get $opts -errorinfo]
|
| 283 |
+
set errCode [dict get $opts -errorcode]
|
| 284 |
+
set cinfo $args
|
| 285 |
+
if {[string bytelength $cinfo] > 150} {
|
| 286 |
+
set cinfo [string range $cinfo 0 150]
|
| 287 |
+
while {[string bytelength $cinfo] > 150} {
|
| 288 |
+
set cinfo [string range $cinfo 0 end-1]
|
| 289 |
+
}
|
| 290 |
+
append cinfo ...
|
| 291 |
+
}
|
| 292 |
+
set tail "\n (\"uplevel\" body line 1)\n invoked\
|
| 293 |
+
from within\n\"uplevel 1 \$args\""
|
| 294 |
+
set expect "$msg\n while executing\n\"$cinfo\"$tail"
|
| 295 |
+
if {$errInfo eq $expect} {
|
| 296 |
+
#
|
| 297 |
+
# The stack has only the eval from the expanded command
|
| 298 |
+
# Do not generate any stack trace here.
|
| 299 |
+
#
|
| 300 |
+
dict unset opts -errorinfo
|
| 301 |
+
dict incr opts -level
|
| 302 |
+
return -options $opts $msg
|
| 303 |
+
}
|
| 304 |
+
#
|
| 305 |
+
# Stack trace is nested, trim off just the contribution
|
| 306 |
+
# from the extra "eval" of $args due to the "catch" above.
|
| 307 |
+
#
|
| 308 |
+
set last [string last $tail $errInfo]
|
| 309 |
+
if {$last + [string length $tail] != [string length $errInfo]} {
|
| 310 |
+
# Very likely cannot happen
|
| 311 |
+
return -options $opts $msg
|
| 312 |
+
}
|
| 313 |
+
set errInfo [string range $errInfo 0 $last-1]
|
| 314 |
+
set tail "\"$cinfo\""
|
| 315 |
+
set last [string last $tail $errInfo]
|
| 316 |
+
if {$last < 0 || $last + [string length $tail] != [string length $errInfo]} {
|
| 317 |
+
return -code error -errorcode $errCode \
|
| 318 |
+
-errorinfo $errInfo $msg
|
| 319 |
+
}
|
| 320 |
+
set errInfo [string range $errInfo 0 $last-1]
|
| 321 |
+
set tail "\n invoked from within\n"
|
| 322 |
+
set last [string last $tail $errInfo]
|
| 323 |
+
if {$last + [string length $tail] == [string length $errInfo]} {
|
| 324 |
+
return -code error -errorcode $errCode \
|
| 325 |
+
-errorinfo [string range $errInfo 0 $last-1] $msg
|
| 326 |
+
}
|
| 327 |
+
set tail "\n while executing\n"
|
| 328 |
+
set last [string last $tail $errInfo]
|
| 329 |
+
if {$last + [string length $tail] == [string length $errInfo]} {
|
| 330 |
+
return -code error -errorcode $errCode \
|
| 331 |
+
-errorinfo [string range $errInfo 0 $last-1] $msg
|
| 332 |
+
}
|
| 333 |
+
return -options $opts $msg
|
| 334 |
+
} else {
|
| 335 |
+
dict incr opts -level
|
| 336 |
+
return -options $opts $msg
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
if {([info level] == 1) && ([info script] eq "")
|
| 342 |
+
&& [info exists tcl_interactive] && $tcl_interactive} {
|
| 343 |
+
if {![info exists auto_noexec]} {
|
| 344 |
+
set new [auto_execok $name]
|
| 345 |
+
if {$new ne ""} {
|
| 346 |
+
set redir ""
|
| 347 |
+
if {[namespace which -command console] eq ""} {
|
| 348 |
+
set redir ">&@stdout <@stdin"
|
| 349 |
+
}
|
| 350 |
+
uplevel 1 [list ::catch \
|
| 351 |
+
[concat exec $redir $new [lrange $args 1 end]] \
|
| 352 |
+
::tcl::UnknownResult ::tcl::UnknownOptions]
|
| 353 |
+
dict incr ::tcl::UnknownOptions -level
|
| 354 |
+
return -options $::tcl::UnknownOptions $::tcl::UnknownResult
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
if {$name eq "!!"} {
|
| 358 |
+
set newcmd [history event]
|
| 359 |
+
} elseif {[regexp {^!(.+)$} $name -> event]} {
|
| 360 |
+
set newcmd [history event $event]
|
| 361 |
+
} elseif {[regexp {^\^([^^]*)\^([^^]*)\^?$} $name -> old new]} {
|
| 362 |
+
set newcmd [history event -1]
|
| 363 |
+
catch {regsub -all -- $old $newcmd $new newcmd}
|
| 364 |
+
}
|
| 365 |
+
if {[info exists newcmd]} {
|
| 366 |
+
tclLog $newcmd
|
| 367 |
+
history change $newcmd 0
|
| 368 |
+
uplevel 1 [list ::catch $newcmd \
|
| 369 |
+
::tcl::UnknownResult ::tcl::UnknownOptions]
|
| 370 |
+
dict incr ::tcl::UnknownOptions -level
|
| 371 |
+
return -options $::tcl::UnknownOptions $::tcl::UnknownResult
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
set ret [catch [list uplevel 1 [list info commands $name*]] candidates]
|
| 375 |
+
if {$name eq "::"} {
|
| 376 |
+
set name ""
|
| 377 |
+
}
|
| 378 |
+
if {$ret != 0} {
|
| 379 |
+
dict append opts -errorinfo \
|
| 380 |
+
"\n (expanding command prefix \"$name\" in unknown)"
|
| 381 |
+
return -options $opts $candidates
|
| 382 |
+
}
|
| 383 |
+
# Filter out bogus matches when $name contained
|
| 384 |
+
# a glob-special char [Bug 946952]
|
| 385 |
+
if {$name eq ""} {
|
| 386 |
+
# Handle empty $name separately due to strangeness
|
| 387 |
+
# in [string first] (See RFE 1243354)
|
| 388 |
+
set cmds $candidates
|
| 389 |
+
} else {
|
| 390 |
+
set cmds [list]
|
| 391 |
+
foreach x $candidates {
|
| 392 |
+
if {[string first $name $x] == 0} {
|
| 393 |
+
lappend cmds $x
|
| 394 |
+
}
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
if {[llength $cmds] == 1} {
|
| 398 |
+
uplevel 1 [list ::catch [lreplace $args 0 0 [lindex $cmds 0]] \
|
| 399 |
+
::tcl::UnknownResult ::tcl::UnknownOptions]
|
| 400 |
+
dict incr ::tcl::UnknownOptions -level
|
| 401 |
+
return -options $::tcl::UnknownOptions $::tcl::UnknownResult
|
| 402 |
+
}
|
| 403 |
+
if {[llength $cmds]} {
|
| 404 |
+
return -code error "ambiguous command name \"$name\": [lsort $cmds]"
|
| 405 |
+
}
|
| 406 |
+
}
|
| 407 |
+
return -code error -errorcode [list TCL LOOKUP COMMAND $name] \
|
| 408 |
+
"invalid command name \"$name\""
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
# auto_load --
|
| 412 |
+
# Checks a collection of library directories to see if a procedure
|
| 413 |
+
# is defined in one of them. If so, it sources the appropriate
|
| 414 |
+
# library file to create the procedure. Returns 1 if it successfully
|
| 415 |
+
# loaded the procedure, 0 otherwise.
|
| 416 |
+
#
|
| 417 |
+
# Arguments:
|
| 418 |
+
# cmd - Name of the command to find and load.
|
| 419 |
+
# namespace (optional) The namespace where the command is being used - must be
|
| 420 |
+
# a canonical namespace as returned [namespace current]
|
| 421 |
+
# for instance. If not given, namespace current is used.
|
| 422 |
+
|
| 423 |
+
proc auto_load {cmd {namespace {}}} {
|
| 424 |
+
global auto_index auto_path
|
| 425 |
+
|
| 426 |
+
if {$namespace eq ""} {
|
| 427 |
+
set namespace [uplevel 1 [list ::namespace current]]
|
| 428 |
+
}
|
| 429 |
+
set nameList [auto_qualify $cmd $namespace]
|
| 430 |
+
# workaround non canonical auto_index entries that might be around
|
| 431 |
+
# from older auto_mkindex versions
|
| 432 |
+
lappend nameList $cmd
|
| 433 |
+
foreach name $nameList {
|
| 434 |
+
if {[info exists auto_index($name)]} {
|
| 435 |
+
namespace eval :: $auto_index($name)
|
| 436 |
+
# There's a couple of ways to look for a command of a given
|
| 437 |
+
# name. One is to use
|
| 438 |
+
# info commands $name
|
| 439 |
+
# Unfortunately, if the name has glob-magic chars in it like *
|
| 440 |
+
# or [], it may not match. For our purposes here, a better
|
| 441 |
+
# route is to use
|
| 442 |
+
# namespace which -command $name
|
| 443 |
+
if {[namespace which -command $name] ne ""} {
|
| 444 |
+
return 1
|
| 445 |
+
}
|
| 446 |
+
}
|
| 447 |
+
}
|
| 448 |
+
if {![info exists auto_path]} {
|
| 449 |
+
return 0
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
if {![auto_load_index]} {
|
| 453 |
+
return 0
|
| 454 |
+
}
|
| 455 |
+
foreach name $nameList {
|
| 456 |
+
if {[info exists auto_index($name)]} {
|
| 457 |
+
namespace eval :: $auto_index($name)
|
| 458 |
+
if {[namespace which -command $name] ne ""} {
|
| 459 |
+
return 1
|
| 460 |
+
}
|
| 461 |
+
}
|
| 462 |
+
}
|
| 463 |
+
return 0
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
# auto_load_index --
|
| 467 |
+
# Loads the contents of tclIndex files on the auto_path directory
|
| 468 |
+
# list. This is usually invoked within auto_load to load the index
|
| 469 |
+
# of available commands. Returns 1 if the index is loaded, and 0 if
|
| 470 |
+
# the index is already loaded and up to date.
|
| 471 |
+
#
|
| 472 |
+
# Arguments:
|
| 473 |
+
# None.
|
| 474 |
+
|
| 475 |
+
proc auto_load_index {} {
|
| 476 |
+
variable ::tcl::auto_oldpath
|
| 477 |
+
global auto_index auto_path
|
| 478 |
+
|
| 479 |
+
if {[info exists auto_oldpath] && ($auto_oldpath eq $auto_path)} {
|
| 480 |
+
return 0
|
| 481 |
+
}
|
| 482 |
+
set auto_oldpath $auto_path
|
| 483 |
+
|
| 484 |
+
# Check if we are a safe interpreter. In that case, we support only
|
| 485 |
+
# newer format tclIndex files.
|
| 486 |
+
|
| 487 |
+
set issafe [interp issafe]
|
| 488 |
+
for {set i [expr {[llength $auto_path] - 1}]} {$i >= 0} {incr i -1} {
|
| 489 |
+
set dir [lindex $auto_path $i]
|
| 490 |
+
set f ""
|
| 491 |
+
if {$issafe} {
|
| 492 |
+
catch {source [file join $dir tclIndex]}
|
| 493 |
+
} elseif {[catch {set f [open [file join $dir tclIndex]]}]} {
|
| 494 |
+
continue
|
| 495 |
+
} else {
|
| 496 |
+
set error [catch {
|
| 497 |
+
fconfigure $f -eofchar "\x1A {}"
|
| 498 |
+
set id [gets $f]
|
| 499 |
+
if {$id eq "# Tcl autoload index file, version 2.0"} {
|
| 500 |
+
eval [read $f]
|
| 501 |
+
} elseif {$id eq "# Tcl autoload index file: each line identifies a Tcl"} {
|
| 502 |
+
while {[gets $f line] >= 0} {
|
| 503 |
+
if {([string index $line 0] eq "#") \
|
| 504 |
+
|| ([llength $line] != 2)} {
|
| 505 |
+
continue
|
| 506 |
+
}
|
| 507 |
+
set name [lindex $line 0]
|
| 508 |
+
set auto_index($name) \
|
| 509 |
+
"source [file join $dir [lindex $line 1]]"
|
| 510 |
+
}
|
| 511 |
+
} else {
|
| 512 |
+
error "[file join $dir tclIndex] isn't a proper Tcl index file"
|
| 513 |
+
}
|
| 514 |
+
} msg opts]
|
| 515 |
+
if {$f ne ""} {
|
| 516 |
+
close $f
|
| 517 |
+
}
|
| 518 |
+
if {$error} {
|
| 519 |
+
return -options $opts $msg
|
| 520 |
+
}
|
| 521 |
+
}
|
| 522 |
+
}
|
| 523 |
+
return 1
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
# auto_qualify --
|
| 527 |
+
#
|
| 528 |
+
# Compute a fully qualified names list for use in the auto_index array.
|
| 529 |
+
# For historical reasons, commands in the global namespace do not have leading
|
| 530 |
+
# :: in the index key. The list has two elements when the command name is
|
| 531 |
+
# relative (no leading ::) and the namespace is not the global one. Otherwise
|
| 532 |
+
# only one name is returned (and searched in the auto_index).
|
| 533 |
+
#
|
| 534 |
+
# Arguments -
|
| 535 |
+
# cmd The command name. Can be any name accepted for command
|
| 536 |
+
# invocations (Like "foo::::bar").
|
| 537 |
+
# namespace The namespace where the command is being used - must be
|
| 538 |
+
# a canonical namespace as returned by [namespace current]
|
| 539 |
+
# for instance.
|
| 540 |
+
|
| 541 |
+
proc auto_qualify {cmd namespace} {
|
| 542 |
+
|
| 543 |
+
# count separators and clean them up
|
| 544 |
+
# (making sure that foo:::::bar will be treated as foo::bar)
|
| 545 |
+
set n [regsub -all {::+} $cmd :: cmd]
|
| 546 |
+
|
| 547 |
+
# Ignore namespace if the name starts with ::
|
| 548 |
+
# Handle special case of only leading ::
|
| 549 |
+
|
| 550 |
+
# Before each return case we give an example of which category it is
|
| 551 |
+
# with the following form :
|
| 552 |
+
# (inputCmd, inputNameSpace) -> output
|
| 553 |
+
|
| 554 |
+
if {[string match ::* $cmd]} {
|
| 555 |
+
if {$n > 1} {
|
| 556 |
+
# (::foo::bar , *) -> ::foo::bar
|
| 557 |
+
return [list $cmd]
|
| 558 |
+
} else {
|
| 559 |
+
# (::global , *) -> global
|
| 560 |
+
return [list [string range $cmd 2 end]]
|
| 561 |
+
}
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
# Potentially returning 2 elements to try :
|
| 565 |
+
# (if the current namespace is not the global one)
|
| 566 |
+
|
| 567 |
+
if {$n == 0} {
|
| 568 |
+
if {$namespace eq "::"} {
|
| 569 |
+
# (nocolons , ::) -> nocolons
|
| 570 |
+
return [list $cmd]
|
| 571 |
+
} else {
|
| 572 |
+
# (nocolons , ::sub) -> ::sub::nocolons nocolons
|
| 573 |
+
return [list ${namespace}::$cmd $cmd]
|
| 574 |
+
}
|
| 575 |
+
} elseif {$namespace eq "::"} {
|
| 576 |
+
# (foo::bar , ::) -> ::foo::bar
|
| 577 |
+
return [list ::$cmd]
|
| 578 |
+
} else {
|
| 579 |
+
# (foo::bar , ::sub) -> ::sub::foo::bar ::foo::bar
|
| 580 |
+
return [list ${namespace}::$cmd ::$cmd]
|
| 581 |
+
}
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
# auto_import --
|
| 585 |
+
#
|
| 586 |
+
# Invoked during "namespace import" to make see if the imported commands
|
| 587 |
+
# reside in an autoloaded library. If so, the commands are loaded so
|
| 588 |
+
# that they will be available for the import links. If not, then this
|
| 589 |
+
# procedure does nothing.
|
| 590 |
+
#
|
| 591 |
+
# Arguments -
|
| 592 |
+
# pattern The pattern of commands being imported (like "foo::*")
|
| 593 |
+
# a canonical namespace as returned by [namespace current]
|
| 594 |
+
|
| 595 |
+
proc auto_import {pattern} {
|
| 596 |
+
global auto_index
|
| 597 |
+
|
| 598 |
+
# If no namespace is specified, this will be an error case
|
| 599 |
+
|
| 600 |
+
if {![string match *::* $pattern]} {
|
| 601 |
+
return
|
| 602 |
+
}
|
| 603 |
+
|
| 604 |
+
set ns [uplevel 1 [list ::namespace current]]
|
| 605 |
+
set patternList [auto_qualify $pattern $ns]
|
| 606 |
+
|
| 607 |
+
auto_load_index
|
| 608 |
+
|
| 609 |
+
foreach pattern $patternList {
|
| 610 |
+
foreach name [array names auto_index $pattern] {
|
| 611 |
+
if {([namespace which -command $name] eq "")
|
| 612 |
+
&& ([namespace qualifiers $pattern] eq [namespace qualifiers $name])} {
|
| 613 |
+
namespace eval :: $auto_index($name)
|
| 614 |
+
}
|
| 615 |
+
}
|
| 616 |
+
}
|
| 617 |
+
}
|
| 618 |
+
|
| 619 |
+
# auto_execok --
|
| 620 |
+
#
|
| 621 |
+
# Returns string that indicates name of program to execute if
|
| 622 |
+
# name corresponds to a shell builtin or an executable in the
|
| 623 |
+
# Windows search path, or "" otherwise. Builds an associative
|
| 624 |
+
# array auto_execs that caches information about previous checks,
|
| 625 |
+
# for speed.
|
| 626 |
+
#
|
| 627 |
+
# Arguments:
|
| 628 |
+
# name - Name of a command.
|
| 629 |
+
|
| 630 |
+
if {$tcl_platform(platform) eq "windows"} {
|
| 631 |
+
# Windows version.
|
| 632 |
+
#
|
| 633 |
+
# Note that file executable doesn't work under Windows, so we have to
|
| 634 |
+
# look for files with .exe, .com, or .bat extensions. Also, the path
|
| 635 |
+
# may be in the Path or PATH environment variables, and path
|
| 636 |
+
# components are separated with semicolons, not colons as under Unix.
|
| 637 |
+
#
|
| 638 |
+
proc auto_execok name {
|
| 639 |
+
global auto_execs env tcl_platform
|
| 640 |
+
|
| 641 |
+
if {[info exists auto_execs($name)]} {
|
| 642 |
+
return $auto_execs($name)
|
| 643 |
+
}
|
| 644 |
+
set auto_execs($name) ""
|
| 645 |
+
|
| 646 |
+
set shellBuiltins [list assoc cls copy date del dir echo erase exit ftype \
|
| 647 |
+
md mkdir mklink move rd ren rename rmdir start time type ver vol]
|
| 648 |
+
if {[info exists env(PATHEXT)]} {
|
| 649 |
+
# Add an initial ; to have the {} extension check first.
|
| 650 |
+
set execExtensions [split ";$env(PATHEXT)" ";"]
|
| 651 |
+
} else {
|
| 652 |
+
set execExtensions [list {} .com .exe .bat .cmd]
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
if {[string tolower $name] in $shellBuiltins} {
|
| 656 |
+
# When this is command.com for some reason on Win2K, Tcl won't
|
| 657 |
+
# exec it unless the case is right, which this corrects. COMSPEC
|
| 658 |
+
# may not point to a real file, so do the check.
|
| 659 |
+
set cmd $env(COMSPEC)
|
| 660 |
+
if {[file exists $cmd]} {
|
| 661 |
+
set cmd [file attributes $cmd -shortname]
|
| 662 |
+
}
|
| 663 |
+
return [set auto_execs($name) [list $cmd /c $name]]
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
if {[llength [file split $name]] != 1} {
|
| 667 |
+
foreach ext $execExtensions {
|
| 668 |
+
set file ${name}${ext}
|
| 669 |
+
if {[file exists $file] && ![file isdirectory $file]} {
|
| 670 |
+
return [set auto_execs($name) [list $file]]
|
| 671 |
+
}
|
| 672 |
+
}
|
| 673 |
+
return ""
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
set path "[file dirname [info nameof]];.;"
|
| 677 |
+
if {[info exists env(SystemRoot)]} {
|
| 678 |
+
set windir $env(SystemRoot)
|
| 679 |
+
} elseif {[info exists env(WINDIR)]} {
|
| 680 |
+
set windir $env(WINDIR)
|
| 681 |
+
}
|
| 682 |
+
if {[info exists windir]} {
|
| 683 |
+
if {$tcl_platform(os) eq "Windows NT"} {
|
| 684 |
+
append path "$windir/system32;"
|
| 685 |
+
}
|
| 686 |
+
append path "$windir/system;$windir;"
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
foreach var {PATH Path path} {
|
| 690 |
+
if {[info exists env($var)]} {
|
| 691 |
+
append path ";$env($var)"
|
| 692 |
+
}
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
foreach ext $execExtensions {
|
| 696 |
+
unset -nocomplain checked
|
| 697 |
+
foreach dir [split $path {;}] {
|
| 698 |
+
# Skip already checked directories
|
| 699 |
+
if {[info exists checked($dir)] || ($dir eq "")} {
|
| 700 |
+
continue
|
| 701 |
+
}
|
| 702 |
+
set checked($dir) {}
|
| 703 |
+
set file [file join $dir ${name}${ext}]
|
| 704 |
+
if {[file exists $file] && ![file isdirectory $file]} {
|
| 705 |
+
return [set auto_execs($name) [list $file]]
|
| 706 |
+
}
|
| 707 |
+
}
|
| 708 |
+
}
|
| 709 |
+
return ""
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
} else {
|
| 713 |
+
# Unix version.
|
| 714 |
+
#
|
| 715 |
+
proc auto_execok name {
|
| 716 |
+
global auto_execs env
|
| 717 |
+
|
| 718 |
+
if {[info exists auto_execs($name)]} {
|
| 719 |
+
return $auto_execs($name)
|
| 720 |
+
}
|
| 721 |
+
set auto_execs($name) ""
|
| 722 |
+
if {[llength [file split $name]] != 1} {
|
| 723 |
+
if {[file executable $name] && ![file isdirectory $name]} {
|
| 724 |
+
set auto_execs($name) [list $name]
|
| 725 |
+
}
|
| 726 |
+
return $auto_execs($name)
|
| 727 |
+
}
|
| 728 |
+
foreach dir [split $env(PATH) :] {
|
| 729 |
+
if {$dir eq ""} {
|
| 730 |
+
set dir .
|
| 731 |
+
}
|
| 732 |
+
set file [file join $dir $name]
|
| 733 |
+
if {[file executable $file] && ![file isdirectory $file]} {
|
| 734 |
+
set auto_execs($name) [list $file]
|
| 735 |
+
return $auto_execs($name)
|
| 736 |
+
}
|
| 737 |
+
}
|
| 738 |
+
return ""
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
# ::tcl::CopyDirectory --
|
| 744 |
+
#
|
| 745 |
+
# This procedure is called by Tcl's core when attempts to call the
|
| 746 |
+
# filesystem's copydirectory function fail. The semantics of the call
|
| 747 |
+
# are that 'dest' does not yet exist, i.e. dest should become the exact
|
| 748 |
+
# image of src. If dest does exist, we throw an error.
|
| 749 |
+
#
|
| 750 |
+
# Note that making changes to this procedure can change the results
|
| 751 |
+
# of running Tcl's tests.
|
| 752 |
+
#
|
| 753 |
+
# Arguments:
|
| 754 |
+
# action - "renaming" or "copying"
|
| 755 |
+
# src - source directory
|
| 756 |
+
# dest - destination directory
|
| 757 |
+
proc tcl::CopyDirectory {action src dest} {
|
| 758 |
+
set nsrc [file normalize $src]
|
| 759 |
+
set ndest [file normalize $dest]
|
| 760 |
+
|
| 761 |
+
if {$action eq "renaming"} {
|
| 762 |
+
# Can't rename volumes. We could give a more precise
|
| 763 |
+
# error message here, but that would break the test suite.
|
| 764 |
+
if {$nsrc in [file volumes]} {
|
| 765 |
+
return -code error "error $action \"$src\" to\
|
| 766 |
+
\"$dest\": trying to rename a volume or move a directory\
|
| 767 |
+
into itself"
|
| 768 |
+
}
|
| 769 |
+
}
|
| 770 |
+
if {[file exists $dest]} {
|
| 771 |
+
if {$nsrc eq $ndest} {
|
| 772 |
+
return -code error "error $action \"$src\" to\
|
| 773 |
+
\"$dest\": trying to rename a volume or move a directory\
|
| 774 |
+
into itself"
|
| 775 |
+
}
|
| 776 |
+
if {$action eq "copying"} {
|
| 777 |
+
# We used to throw an error here, but, looking more closely
|
| 778 |
+
# at the core copy code in tclFCmd.c, if the destination
|
| 779 |
+
# exists, then we should only call this function if -force
|
| 780 |
+
# is true, which means we just want to over-write. So,
|
| 781 |
+
# the following code is now commented out.
|
| 782 |
+
#
|
| 783 |
+
# return -code error "error $action \"$src\" to\
|
| 784 |
+
# \"$dest\": file already exists"
|
| 785 |
+
} else {
|
| 786 |
+
# Depending on the platform, and on the current
|
| 787 |
+
# working directory, the directories '.', '..'
|
| 788 |
+
# can be returned in various combinations. Anyway,
|
| 789 |
+
# if any other file is returned, we must signal an error.
|
| 790 |
+
set existing [glob -nocomplain -directory $dest * .*]
|
| 791 |
+
lappend existing {*}[glob -nocomplain -directory $dest \
|
| 792 |
+
-type hidden * .*]
|
| 793 |
+
foreach s $existing {
|
| 794 |
+
if {[file tail $s] ni {. ..}} {
|
| 795 |
+
return -code error "error $action \"$src\" to\
|
| 796 |
+
\"$dest\": file already exists"
|
| 797 |
+
}
|
| 798 |
+
}
|
| 799 |
+
}
|
| 800 |
+
} else {
|
| 801 |
+
if {[string first $nsrc $ndest] >= 0} {
|
| 802 |
+
set srclen [expr {[llength [file split $nsrc]] - 1}]
|
| 803 |
+
set ndest [lindex [file split $ndest] $srclen]
|
| 804 |
+
if {$ndest eq [file tail $nsrc]} {
|
| 805 |
+
return -code error "error $action \"$src\" to\
|
| 806 |
+
\"$dest\": trying to rename a volume or move a directory\
|
| 807 |
+
into itself"
|
| 808 |
+
}
|
| 809 |
+
}
|
| 810 |
+
file mkdir $dest
|
| 811 |
+
}
|
| 812 |
+
# Have to be careful to capture both visible and hidden files.
|
| 813 |
+
# We will also be more generous to the file system and not
|
| 814 |
+
# assume the hidden and non-hidden lists are non-overlapping.
|
| 815 |
+
#
|
| 816 |
+
# On Unix 'hidden' files begin with '.'. On other platforms
|
| 817 |
+
# or filesystems hidden files may have other interpretations.
|
| 818 |
+
set filelist [concat [glob -nocomplain -directory $src *] \
|
| 819 |
+
[glob -nocomplain -directory $src -types hidden *]]
|
| 820 |
+
|
| 821 |
+
foreach s [lsort -unique $filelist] {
|
| 822 |
+
if {[file tail $s] ni {. ..}} {
|
| 823 |
+
file copy -force -- $s [file join $dest [file tail $s]]
|
| 824 |
+
}
|
| 825 |
+
}
|
| 826 |
+
return
|
| 827 |
+
}
|
llava/lib/tcl8.6/parray.tcl
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# parray:
|
| 2 |
+
# Print the contents of a global array on stdout.
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 1991-1993 The Regents of the University of California.
|
| 5 |
+
# Copyright (c) 1994 Sun Microsystems, Inc.
|
| 6 |
+
#
|
| 7 |
+
# See the file "license.terms" for information on usage and redistribution
|
| 8 |
+
# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
| 9 |
+
#
|
| 10 |
+
|
| 11 |
+
proc parray {a {pattern *}} {
|
| 12 |
+
upvar 1 $a array
|
| 13 |
+
if {![array exists array]} {
|
| 14 |
+
return -code error "\"$a\" isn't an array"
|
| 15 |
+
}
|
| 16 |
+
set maxl 0
|
| 17 |
+
set names [lsort [array names array $pattern]]
|
| 18 |
+
foreach name $names {
|
| 19 |
+
if {[string length $name] > $maxl} {
|
| 20 |
+
set maxl [string length $name]
|
| 21 |
+
}
|
| 22 |
+
}
|
| 23 |
+
set maxl [expr {$maxl + [string length $a] + 2}]
|
| 24 |
+
foreach name $names {
|
| 25 |
+
set nameString [format %s(%s) $a $name]
|
| 26 |
+
puts stdout [format "%-*s = %s" $maxl $nameString $array($name)]
|
| 27 |
+
}
|
| 28 |
+
}
|
llava/lib/tcl8.6/tclAppInit.c
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*
|
| 2 |
+
* tclAppInit.c --
|
| 3 |
+
*
|
| 4 |
+
* Provides a default version of the main program and Tcl_AppInit
|
| 5 |
+
* procedure for tclsh and other Tcl-based applications (without Tk).
|
| 6 |
+
*
|
| 7 |
+
* Copyright (c) 1993 The Regents of the University of California.
|
| 8 |
+
* Copyright (c) 1994-1997 Sun Microsystems, Inc.
|
| 9 |
+
* Copyright (c) 1998-1999 Scriptics Corporation.
|
| 10 |
+
*
|
| 11 |
+
* See the file "license.terms" for information on usage and redistribution of
|
| 12 |
+
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
|
| 13 |
+
*/
|
| 14 |
+
|
| 15 |
+
#undef BUILD_tcl
|
| 16 |
+
#undef STATIC_BUILD
|
| 17 |
+
#include "tcl.h"
|
| 18 |
+
#if TCL_MAJOR_VERSION < 9 && TCL_MINOR_VERSION < 7
|
| 19 |
+
# define Tcl_LibraryInitProc Tcl_PackageInitProc
|
| 20 |
+
# define Tcl_StaticLibrary Tcl_StaticPackage
|
| 21 |
+
#endif
|
| 22 |
+
|
| 23 |
+
#ifdef TCL_TEST
|
| 24 |
+
extern Tcl_LibraryInitProc Tcltest_Init;
|
| 25 |
+
extern Tcl_LibraryInitProc Tcltest_SafeInit;
|
| 26 |
+
#endif /* TCL_TEST */
|
| 27 |
+
|
| 28 |
+
#ifdef TCL_XT_TEST
|
| 29 |
+
extern void XtToolkitInitialize(void);
|
| 30 |
+
extern Tcl_LibraryInitProc Tclxttest_Init;
|
| 31 |
+
#endif /* TCL_XT_TEST */
|
| 32 |
+
|
| 33 |
+
/*
|
| 34 |
+
* The following #if block allows you to change the AppInit function by using
|
| 35 |
+
* a #define of TCL_LOCAL_APPINIT instead of rewriting this entire file. The
|
| 36 |
+
* #if checks for that #define and uses Tcl_AppInit if it does not exist.
|
| 37 |
+
*/
|
| 38 |
+
|
| 39 |
+
#ifndef TCL_LOCAL_APPINIT
|
| 40 |
+
#define TCL_LOCAL_APPINIT Tcl_AppInit
|
| 41 |
+
#endif
|
| 42 |
+
#ifndef MODULE_SCOPE
|
| 43 |
+
# define MODULE_SCOPE extern
|
| 44 |
+
#endif
|
| 45 |
+
MODULE_SCOPE int TCL_LOCAL_APPINIT(Tcl_Interp *);
|
| 46 |
+
MODULE_SCOPE int main(int, char **);
|
| 47 |
+
|
| 48 |
+
/*
|
| 49 |
+
* The following #if block allows you to change how Tcl finds the startup
|
| 50 |
+
* script, prime the library or encoding paths, fiddle with the argv, etc.,
|
| 51 |
+
* without needing to rewrite Tcl_Main()
|
| 52 |
+
*/
|
| 53 |
+
|
| 54 |
+
#ifdef TCL_LOCAL_MAIN_HOOK
|
| 55 |
+
MODULE_SCOPE int TCL_LOCAL_MAIN_HOOK(int *argc, char ***argv);
|
| 56 |
+
#endif
|
| 57 |
+
|
| 58 |
+
/*
|
| 59 |
+
*----------------------------------------------------------------------
|
| 60 |
+
*
|
| 61 |
+
* main --
|
| 62 |
+
*
|
| 63 |
+
* This is the main program for the application.
|
| 64 |
+
*
|
| 65 |
+
* Results:
|
| 66 |
+
* None: Tcl_Main never returns here, so this procedure never returns
|
| 67 |
+
* either.
|
| 68 |
+
*
|
| 69 |
+
* Side effects:
|
| 70 |
+
* Just about anything, since from here we call arbitrary Tcl code.
|
| 71 |
+
*
|
| 72 |
+
*----------------------------------------------------------------------
|
| 73 |
+
*/
|
| 74 |
+
|
| 75 |
+
int
|
| 76 |
+
main(
|
| 77 |
+
int argc, /* Number of command-line arguments. */
|
| 78 |
+
char *argv[]) /* Values of command-line arguments. */
|
| 79 |
+
{
|
| 80 |
+
#ifdef TCL_XT_TEST
|
| 81 |
+
XtToolkitInitialize();
|
| 82 |
+
#endif
|
| 83 |
+
|
| 84 |
+
#ifdef TCL_LOCAL_MAIN_HOOK
|
| 85 |
+
TCL_LOCAL_MAIN_HOOK(&argc, &argv);
|
| 86 |
+
#elif (TCL_MAJOR_VERSION > 8 || TCL_MINOR_VERSION > 6) && (!defined(_WIN32) || defined(UNICODE))
|
| 87 |
+
/* New in Tcl 8.7. This doesn't work on Windows without UNICODE */
|
| 88 |
+
TclZipfs_AppHook(&argc, &argv);
|
| 89 |
+
#endif
|
| 90 |
+
|
| 91 |
+
Tcl_Main(argc, argv, TCL_LOCAL_APPINIT);
|
| 92 |
+
return 0; /* Needed only to prevent compiler warning. */
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
/*
|
| 96 |
+
*----------------------------------------------------------------------
|
| 97 |
+
*
|
| 98 |
+
* Tcl_AppInit --
|
| 99 |
+
*
|
| 100 |
+
* This procedure performs application-specific initialization. Most
|
| 101 |
+
* applications, especially those that incorporate additional packages,
|
| 102 |
+
* will have their own version of this procedure.
|
| 103 |
+
*
|
| 104 |
+
* Results:
|
| 105 |
+
* Returns a standard Tcl completion code, and leaves an error message in
|
| 106 |
+
* the interp's result if an error occurs.
|
| 107 |
+
*
|
| 108 |
+
* Side effects:
|
| 109 |
+
* Depends on the startup script.
|
| 110 |
+
*
|
| 111 |
+
*----------------------------------------------------------------------
|
| 112 |
+
*/
|
| 113 |
+
|
| 114 |
+
int
|
| 115 |
+
Tcl_AppInit(
|
| 116 |
+
Tcl_Interp *interp) /* Interpreter for application. */
|
| 117 |
+
{
|
| 118 |
+
if ((Tcl_Init)(interp) == TCL_ERROR) {
|
| 119 |
+
return TCL_ERROR;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
#ifdef TCL_XT_TEST
|
| 123 |
+
if (Tclxttest_Init(interp) == TCL_ERROR) {
|
| 124 |
+
return TCL_ERROR;
|
| 125 |
+
}
|
| 126 |
+
#endif
|
| 127 |
+
|
| 128 |
+
#ifdef TCL_TEST
|
| 129 |
+
if (Tcltest_Init(interp) == TCL_ERROR) {
|
| 130 |
+
return TCL_ERROR;
|
| 131 |
+
}
|
| 132 |
+
Tcl_StaticLibrary(interp, "Tcltest", Tcltest_Init, Tcltest_SafeInit);
|
| 133 |
+
#endif /* TCL_TEST */
|
| 134 |
+
|
| 135 |
+
/*
|
| 136 |
+
* Call the init procedures for included packages. Each call should look
|
| 137 |
+
* like this:
|
| 138 |
+
*
|
| 139 |
+
* if (Mod_Init(interp) == TCL_ERROR) {
|
| 140 |
+
* return TCL_ERROR;
|
| 141 |
+
* }
|
| 142 |
+
*
|
| 143 |
+
* where "Mod" is the name of the module. (Dynamically-loadable packages
|
| 144 |
+
* should have the same entry-point name.)
|
| 145 |
+
*/
|
| 146 |
+
|
| 147 |
+
/*
|
| 148 |
+
* Call Tcl_CreateCommand for application-specific commands, if they
|
| 149 |
+
* weren't already created by the init procedures called above.
|
| 150 |
+
*/
|
| 151 |
+
|
| 152 |
+
/*
|
| 153 |
+
* Specify a user-specific startup file to invoke if the application is
|
| 154 |
+
* run interactively. Typically the startup file is "~/.apprc" where "app"
|
| 155 |
+
* is the name of the application. If this line is deleted then no
|
| 156 |
+
* user-specific startup file will be run under any conditions.
|
| 157 |
+
*/
|
| 158 |
+
|
| 159 |
+
#ifdef DJGPP
|
| 160 |
+
(Tcl_ObjSetVar2)(interp, Tcl_NewStringObj("tcl_rcFileName", -1), NULL,
|
| 161 |
+
Tcl_NewStringObj("~/tclsh.rc", -1), TCL_GLOBAL_ONLY);
|
| 162 |
+
#else
|
| 163 |
+
(Tcl_ObjSetVar2)(interp, Tcl_NewStringObj("tcl_rcFileName", -1), NULL,
|
| 164 |
+
Tcl_NewStringObj("~/.tclshrc", -1), TCL_GLOBAL_ONLY);
|
| 165 |
+
#endif
|
| 166 |
+
|
| 167 |
+
return TCL_OK;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
/*
|
| 171 |
+
* Local Variables:
|
| 172 |
+
* mode: c
|
| 173 |
+
* c-basic-offset: 4
|
| 174 |
+
* fill-column: 78
|
| 175 |
+
* End:
|
| 176 |
+
*/
|
llava/lib/tcl8.6/tclIndex
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Tcl autoload index file, version 2.0
|
| 2 |
+
# -*- tcl -*-
|
| 3 |
+
# This file is generated by the "auto_mkindex" command
|
| 4 |
+
# and sourced to set up indexing information for one or
|
| 5 |
+
# more commands. Typically each line is a command that
|
| 6 |
+
# sets an element in the auto_index array, where the
|
| 7 |
+
# element name is the name of a command and the value is
|
| 8 |
+
# a script that loads the command.
|
| 9 |
+
|
| 10 |
+
set auto_index(auto_reset) [list source [file join $dir auto.tcl]]
|
| 11 |
+
set auto_index(tcl_findLibrary) [list source [file join $dir auto.tcl]]
|
| 12 |
+
set auto_index(auto_mkindex) [list source [file join $dir auto.tcl]]
|
| 13 |
+
set auto_index(auto_mkindex_old) [list source [file join $dir auto.tcl]]
|
| 14 |
+
set auto_index(::auto_mkindex_parser::init) [list source [file join $dir auto.tcl]]
|
| 15 |
+
set auto_index(::auto_mkindex_parser::cleanup) [list source [file join $dir auto.tcl]]
|
| 16 |
+
set auto_index(::auto_mkindex_parser::mkindex) [list source [file join $dir auto.tcl]]
|
| 17 |
+
set auto_index(::auto_mkindex_parser::hook) [list source [file join $dir auto.tcl]]
|
| 18 |
+
set auto_index(::auto_mkindex_parser::slavehook) [list source [file join $dir auto.tcl]]
|
| 19 |
+
set auto_index(::auto_mkindex_parser::command) [list source [file join $dir auto.tcl]]
|
| 20 |
+
set auto_index(::auto_mkindex_parser::commandInit) [list source [file join $dir auto.tcl]]
|
| 21 |
+
set auto_index(::auto_mkindex_parser::fullname) [list source [file join $dir auto.tcl]]
|
| 22 |
+
set auto_index(history) [list source [file join $dir history.tcl]]
|
| 23 |
+
set auto_index(::tcl::history) [list source [file join $dir history.tcl]]
|
| 24 |
+
set auto_index(::tcl::HistAdd) [list source [file join $dir history.tcl]]
|
| 25 |
+
set auto_index(::tcl::HistKeep) [list source [file join $dir history.tcl]]
|
| 26 |
+
set auto_index(::tcl::HistClear) [list source [file join $dir history.tcl]]
|
| 27 |
+
set auto_index(::tcl::HistInfo) [list source [file join $dir history.tcl]]
|
| 28 |
+
set auto_index(::tcl::HistRedo) [list source [file join $dir history.tcl]]
|
| 29 |
+
set auto_index(::tcl::HistIndex) [list source [file join $dir history.tcl]]
|
| 30 |
+
set auto_index(::tcl::HistEvent) [list source [file join $dir history.tcl]]
|
| 31 |
+
set auto_index(::tcl::HistChange) [list source [file join $dir history.tcl]]
|
| 32 |
+
set auto_index(pkg_mkIndex) [list source [file join $dir package.tcl]]
|
| 33 |
+
set auto_index(tclPkgSetup) [list source [file join $dir package.tcl]]
|
| 34 |
+
set auto_index(tclPkgUnknown) [list source [file join $dir package.tcl]]
|
| 35 |
+
set auto_index(::tcl::MacOSXPkgUnknown) [list source [file join $dir package.tcl]]
|
| 36 |
+
set auto_index(::pkg::create) [list source [file join $dir package.tcl]]
|
| 37 |
+
set auto_index(parray) [list source [file join $dir parray.tcl]]
|
| 38 |
+
set auto_index(::safe::InterpStatics) [list source [file join $dir safe.tcl]]
|
| 39 |
+
set auto_index(::safe::InterpNested) [list source [file join $dir safe.tcl]]
|
| 40 |
+
set auto_index(::safe::interpCreate) [list source [file join $dir safe.tcl]]
|
| 41 |
+
set auto_index(::safe::interpInit) [list source [file join $dir safe.tcl]]
|
| 42 |
+
set auto_index(::safe::CheckInterp) [list source [file join $dir safe.tcl]]
|
| 43 |
+
set auto_index(::safe::interpConfigure) [list source [file join $dir safe.tcl]]
|
| 44 |
+
set auto_index(::safe::InterpCreate) [list source [file join $dir safe.tcl]]
|
| 45 |
+
set auto_index(::safe::InterpSetConfig) [list source [file join $dir safe.tcl]]
|
| 46 |
+
set auto_index(::safe::interpFindInAccessPath) [list source [file join $dir safe.tcl]]
|
| 47 |
+
set auto_index(::safe::interpAddToAccessPath) [list source [file join $dir safe.tcl]]
|
| 48 |
+
set auto_index(::safe::InterpInit) [list source [file join $dir safe.tcl]]
|
| 49 |
+
set auto_index(::safe::AddSubDirs) [list source [file join $dir safe.tcl]]
|
| 50 |
+
set auto_index(::safe::interpDelete) [list source [file join $dir safe.tcl]]
|
| 51 |
+
set auto_index(::safe::setLogCmd) [list source [file join $dir safe.tcl]]
|
| 52 |
+
set auto_index(::safe::SyncAccessPath) [list source [file join $dir safe.tcl]]
|
| 53 |
+
set auto_index(::safe::PathToken) [list source [file join $dir safe.tcl]]
|
| 54 |
+
set auto_index(::safe::TranslatePath) [list source [file join $dir safe.tcl]]
|
| 55 |
+
set auto_index(::safe::Log) [list source [file join $dir safe.tcl]]
|
| 56 |
+
set auto_index(::safe::CheckFileName) [list source [file join $dir safe.tcl]]
|
| 57 |
+
set auto_index(::safe::AliasGlob) [list source [file join $dir safe.tcl]]
|
| 58 |
+
set auto_index(::safe::AliasSource) [list source [file join $dir safe.tcl]]
|
| 59 |
+
set auto_index(::safe::AliasLoad) [list source [file join $dir safe.tcl]]
|
| 60 |
+
set auto_index(::safe::FileInAccessPath) [list source [file join $dir safe.tcl]]
|
| 61 |
+
set auto_index(::safe::DirInAccessPath) [list source [file join $dir safe.tcl]]
|
| 62 |
+
set auto_index(::safe::Subset) [list source [file join $dir safe.tcl]]
|
| 63 |
+
set auto_index(::safe::AliasSubset) [list source [file join $dir safe.tcl]]
|
| 64 |
+
set auto_index(::safe::AliasEncoding) [list source [file join $dir safe.tcl]]
|
| 65 |
+
set auto_index(tcl_wordBreakAfter) [list source [file join $dir word.tcl]]
|
| 66 |
+
set auto_index(tcl_wordBreakBefore) [list source [file join $dir word.tcl]]
|
| 67 |
+
set auto_index(tcl_endOfWord) [list source [file join $dir word.tcl]]
|
| 68 |
+
set auto_index(tcl_startOfNextWord) [list source [file join $dir word.tcl]]
|
| 69 |
+
set auto_index(tcl_startOfPreviousWord) [list source [file join $dir word.tcl]]
|
| 70 |
+
set auto_index(::tcl::tm::add) [list source [file join $dir tm.tcl]]
|
| 71 |
+
set auto_index(::tcl::tm::remove) [list source [file join $dir tm.tcl]]
|
| 72 |
+
set auto_index(::tcl::tm::list) [list source [file join $dir tm.tcl]]
|
| 73 |
+
set auto_index(::tcl::tm::Defaults) [list source [file join $dir tm.tcl]]
|
| 74 |
+
set auto_index(::tcl::tm::UnknownHandler) [list source [file join $dir tm.tcl]]
|
| 75 |
+
set auto_index(::tcl::tm::roots) [list source [file join $dir tm.tcl]]
|
| 76 |
+
set auto_index(::tcl::tm::path) [list source [file join $dir tm.tcl]]
|
| 77 |
+
if {[namespace exists ::tcl::unsupported]} {
|
| 78 |
+
set auto_index(timerate) {namespace import ::tcl::unsupported::timerate}
|
| 79 |
+
}
|
llava/lib/tcl8.6/tm.tcl
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- tcl -*-
|
| 2 |
+
#
|
| 3 |
+
# Searching for Tcl Modules. Defines a procedure, declares it as the primary
|
| 4 |
+
# command for finding packages, however also uses the former 'package unknown'
|
| 5 |
+
# command as a fallback.
|
| 6 |
+
#
|
| 7 |
+
# Locates all possible packages in a directory via a less restricted glob. The
|
| 8 |
+
# targeted directory is derived from the name of the requested package, i.e.
|
| 9 |
+
# the TM scan will look only at directories which can contain the requested
|
| 10 |
+
# package. It will register all packages it found in the directory so that
|
| 11 |
+
# future requests have a higher chance of being fulfilled by the ifneeded
|
| 12 |
+
# database without having to come to us again.
|
| 13 |
+
#
|
| 14 |
+
# We do not remember where we have been and simply rescan targeted directories
|
| 15 |
+
# when invoked again. The reasoning is this:
|
| 16 |
+
#
|
| 17 |
+
# - The only way we get back to the same directory is if someone is trying to
|
| 18 |
+
# [package require] something that wasn't there on the first scan.
|
| 19 |
+
#
|
| 20 |
+
# Either
|
| 21 |
+
# 1) It is there now: If we rescan, you get it; if not you don't.
|
| 22 |
+
#
|
| 23 |
+
# This covers the possibility that the application asked for a package
|
| 24 |
+
# late, and the package was actually added to the installation after the
|
| 25 |
+
# application was started. It should still be able to find it.
|
| 26 |
+
#
|
| 27 |
+
# 2) It still is not there: Either way, you don't get it, but the rescan
|
| 28 |
+
# takes time. This is however an error case and we don't care that much
|
| 29 |
+
# about it
|
| 30 |
+
#
|
| 31 |
+
# 3) It was there the first time; but for some reason a "package forget" has
|
| 32 |
+
# been run, and "package" doesn't know about it anymore.
|
| 33 |
+
#
|
| 34 |
+
# This can be an indication that the application wishes to reload some
|
| 35 |
+
# functionality. And should work as well.
|
| 36 |
+
#
|
| 37 |
+
# Note that this also strikes a balance between doing a glob targeting a
|
| 38 |
+
# single package, and thus most likely requiring multiple globs of the same
|
| 39 |
+
# directory when the application is asking for many packages, and trying to
|
| 40 |
+
# glob for _everything_ in all subdirectories when looking for a package,
|
| 41 |
+
# which comes with a heavy startup cost.
|
| 42 |
+
#
|
| 43 |
+
# We scan for regular packages only if no satisfying module was found.
|
| 44 |
+
|
| 45 |
+
namespace eval ::tcl::tm {
|
| 46 |
+
# Default paths. None yet.
|
| 47 |
+
|
| 48 |
+
variable paths {}
|
| 49 |
+
|
| 50 |
+
# The regex pattern a file name has to match to make it a Tcl Module.
|
| 51 |
+
|
| 52 |
+
set pkgpattern {^([_[:alpha:]][:_[:alnum:]]*)-([[:digit:]].*)[.]tm$}
|
| 53 |
+
|
| 54 |
+
# Export the public API
|
| 55 |
+
|
| 56 |
+
namespace export path
|
| 57 |
+
namespace ensemble create -command path -subcommands {add remove list}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
# ::tcl::tm::path implementations --
|
| 61 |
+
#
|
| 62 |
+
# Public API to the module path. See specification.
|
| 63 |
+
#
|
| 64 |
+
# Arguments
|
| 65 |
+
# cmd - The subcommand to execute
|
| 66 |
+
# args - The paths to add/remove. Must not appear querying the
|
| 67 |
+
# path with 'list'.
|
| 68 |
+
#
|
| 69 |
+
# Results
|
| 70 |
+
# No result for subcommands 'add' and 'remove'. A list of paths for
|
| 71 |
+
# 'list'.
|
| 72 |
+
#
|
| 73 |
+
# Side effects
|
| 74 |
+
# The subcommands 'add' and 'remove' manipulate the list of paths to
|
| 75 |
+
# search for Tcl Modules. The subcommand 'list' has no side effects.
|
| 76 |
+
|
| 77 |
+
proc ::tcl::tm::add {args} {
|
| 78 |
+
# PART OF THE ::tcl::tm::path ENSEMBLE
|
| 79 |
+
#
|
| 80 |
+
# The path is added at the head to the list of module paths.
|
| 81 |
+
#
|
| 82 |
+
# The command enforces the restriction that no path may be an ancestor
|
| 83 |
+
# directory of any other path on the list. If the new path violates this
|
| 84 |
+
# restriction an error will be raised.
|
| 85 |
+
#
|
| 86 |
+
# If the path is already present as is no error will be raised and no
|
| 87 |
+
# action will be taken.
|
| 88 |
+
|
| 89 |
+
variable paths
|
| 90 |
+
|
| 91 |
+
# We use a copy of the path as source during validation, and extend it as
|
| 92 |
+
# well. Because we not only have to detect if the new paths are bogus with
|
| 93 |
+
# respect to the existing paths, but also between themselves. Otherwise we
|
| 94 |
+
# can still add bogus paths, by specifying them in a single call. This
|
| 95 |
+
# makes the use of the new paths simpler as well, a trivial assignment of
|
| 96 |
+
# the collected paths to the official state var.
|
| 97 |
+
|
| 98 |
+
set newpaths $paths
|
| 99 |
+
foreach p $args {
|
| 100 |
+
if {$p in $newpaths} {
|
| 101 |
+
# Ignore a path already on the list.
|
| 102 |
+
continue
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
# Search for paths which are subdirectories of the new one. If there
|
| 106 |
+
# are any then the new path violates the restriction about ancestors.
|
| 107 |
+
|
| 108 |
+
set pos [lsearch -glob $newpaths ${p}/*]
|
| 109 |
+
# Cannot use "in", we need the position for the message.
|
| 110 |
+
if {$pos >= 0} {
|
| 111 |
+
return -code error \
|
| 112 |
+
"$p is ancestor of existing module path [lindex $newpaths $pos]."
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
# Now look for existing paths which are ancestors of the new one. This
|
| 116 |
+
# reverse question forces us to loop over the existing paths, as each
|
| 117 |
+
# element is the pattern, not the new path :(
|
| 118 |
+
|
| 119 |
+
foreach ep $newpaths {
|
| 120 |
+
if {[string match ${ep}/* $p]} {
|
| 121 |
+
return -code error \
|
| 122 |
+
"$p is subdirectory of existing module path $ep."
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
set newpaths [linsert $newpaths 0 $p]
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
# The validation of the input is complete and successful, and everything
|
| 130 |
+
# in newpaths is either an old path, or added. We can now extend the
|
| 131 |
+
# official list of paths, a simple assignment is sufficient.
|
| 132 |
+
|
| 133 |
+
set paths $newpaths
|
| 134 |
+
return
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
proc ::tcl::tm::remove {args} {
|
| 138 |
+
# PART OF THE ::tcl::tm::path ENSEMBLE
|
| 139 |
+
#
|
| 140 |
+
# Removes the path from the list of module paths. The command is silently
|
| 141 |
+
# ignored if the path is not on the list.
|
| 142 |
+
|
| 143 |
+
variable paths
|
| 144 |
+
|
| 145 |
+
foreach p $args {
|
| 146 |
+
set pos [lsearch -exact $paths $p]
|
| 147 |
+
if {$pos >= 0} {
|
| 148 |
+
set paths [lreplace $paths $pos $pos]
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
proc ::tcl::tm::list {} {
|
| 154 |
+
# PART OF THE ::tcl::tm::path ENSEMBLE
|
| 155 |
+
|
| 156 |
+
variable paths
|
| 157 |
+
return $paths
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
# ::tcl::tm::UnknownHandler --
|
| 161 |
+
#
|
| 162 |
+
# Unknown handler for Tcl Modules, i.e. packages in module form.
|
| 163 |
+
#
|
| 164 |
+
# Arguments
|
| 165 |
+
# original - Original [package unknown] procedure.
|
| 166 |
+
# name - Name of desired package.
|
| 167 |
+
# version - Version of desired package. Can be the
|
| 168 |
+
# empty string.
|
| 169 |
+
# exact - Either -exact or omitted.
|
| 170 |
+
#
|
| 171 |
+
# Name, version, and exact are used to determine satisfaction. The
|
| 172 |
+
# original is called iff no satisfaction was achieved. The name is also
|
| 173 |
+
# used to compute the directory to target in the search.
|
| 174 |
+
#
|
| 175 |
+
# Results
|
| 176 |
+
# None.
|
| 177 |
+
#
|
| 178 |
+
# Side effects
|
| 179 |
+
# May populate the package ifneeded database with additional provide
|
| 180 |
+
# scripts.
|
| 181 |
+
|
| 182 |
+
proc ::tcl::tm::UnknownHandler {original name args} {
|
| 183 |
+
# Import the list of paths to search for packages in module form.
|
| 184 |
+
# Import the pattern used to check package names in detail.
|
| 185 |
+
|
| 186 |
+
variable paths
|
| 187 |
+
variable pkgpattern
|
| 188 |
+
|
| 189 |
+
# Without paths to search we can do nothing. (Except falling back to the
|
| 190 |
+
# regular search).
|
| 191 |
+
|
| 192 |
+
if {[llength $paths]} {
|
| 193 |
+
set pkgpath [string map {:: /} $name]
|
| 194 |
+
set pkgroot [file dirname $pkgpath]
|
| 195 |
+
if {$pkgroot eq "."} {
|
| 196 |
+
set pkgroot ""
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
# We don't remember a copy of the paths while looping. Tcl Modules are
|
| 200 |
+
# unable to change the list while we are searching for them. This also
|
| 201 |
+
# simplifies the loop, as we cannot get additional directories while
|
| 202 |
+
# iterating over the list. A simple foreach is sufficient.
|
| 203 |
+
|
| 204 |
+
set satisfied 0
|
| 205 |
+
foreach path $paths {
|
| 206 |
+
if {![interp issafe] && ![file exists $path]} {
|
| 207 |
+
continue
|
| 208 |
+
}
|
| 209 |
+
set currentsearchpath [file join $path $pkgroot]
|
| 210 |
+
if {![interp issafe] && ![file exists $currentsearchpath]} {
|
| 211 |
+
continue
|
| 212 |
+
}
|
| 213 |
+
set strip [llength [file split $path]]
|
| 214 |
+
|
| 215 |
+
# Get the module files out of the subdirectories.
|
| 216 |
+
# - Safe Base interpreters have a restricted "glob" command that
|
| 217 |
+
# works in this case.
|
| 218 |
+
# - The "catch" was essential when there was no safe glob and every
|
| 219 |
+
# call in a safe interp failed; it is retained only for corner
|
| 220 |
+
# cases in which the eventual call to glob returns an error.
|
| 221 |
+
|
| 222 |
+
catch {
|
| 223 |
+
# We always look for _all_ possible modules in the current
|
| 224 |
+
# path, to get the max result out of the glob.
|
| 225 |
+
|
| 226 |
+
foreach file [glob -nocomplain -directory $currentsearchpath *.tm] {
|
| 227 |
+
set pkgfilename [join [lrange [file split $file] $strip end] ::]
|
| 228 |
+
|
| 229 |
+
if {![regexp -- $pkgpattern $pkgfilename --> pkgname pkgversion]} {
|
| 230 |
+
# Ignore everything not matching our pattern for
|
| 231 |
+
# package names.
|
| 232 |
+
continue
|
| 233 |
+
}
|
| 234 |
+
try {
|
| 235 |
+
package vcompare $pkgversion 0
|
| 236 |
+
} on error {} {
|
| 237 |
+
# Ignore everything where the version part is not
|
| 238 |
+
# acceptable to "package vcompare".
|
| 239 |
+
continue
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
if {([package ifneeded $pkgname $pkgversion] ne {})
|
| 243 |
+
&& (![interp issafe])
|
| 244 |
+
} {
|
| 245 |
+
# There's already a provide script registered for
|
| 246 |
+
# this version of this package. Since all units of
|
| 247 |
+
# code claiming to be the same version of the same
|
| 248 |
+
# package ought to be identical, just stick with
|
| 249 |
+
# the one we already have.
|
| 250 |
+
# This does not apply to Safe Base interpreters because
|
| 251 |
+
# the token-to-directory mapping may have changed.
|
| 252 |
+
continue
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
# We have found a candidate, generate a "provide script"
|
| 256 |
+
# for it, and remember it. Note that we are using ::list
|
| 257 |
+
# to do this; locally [list] means something else without
|
| 258 |
+
# the namespace specifier.
|
| 259 |
+
|
| 260 |
+
# NOTE. When making changes to the format of the provide
|
| 261 |
+
# command generated below CHECK that the 'LOCATE'
|
| 262 |
+
# procedure in core file 'platform/shell.tcl' still
|
| 263 |
+
# understands it, or, if not, update its implementation
|
| 264 |
+
# appropriately.
|
| 265 |
+
#
|
| 266 |
+
# Right now LOCATE's implementation assumes that the path
|
| 267 |
+
# of the package file is the last element in the list.
|
| 268 |
+
|
| 269 |
+
package ifneeded $pkgname $pkgversion \
|
| 270 |
+
"[::list package provide $pkgname $pkgversion];[::list source -encoding utf-8 $file]"
|
| 271 |
+
|
| 272 |
+
# We abort in this unknown handler only if we got a
|
| 273 |
+
# satisfying candidate for the requested package.
|
| 274 |
+
# Otherwise we still have to fallback to the regular
|
| 275 |
+
# package search to complete the processing.
|
| 276 |
+
|
| 277 |
+
if {($pkgname eq $name)
|
| 278 |
+
&& [package vsatisfies $pkgversion {*}$args]} {
|
| 279 |
+
set satisfied 1
|
| 280 |
+
|
| 281 |
+
# We do not abort the loop, and keep adding provide
|
| 282 |
+
# scripts for every candidate in the directory, just
|
| 283 |
+
# remember to not fall back to the regular search
|
| 284 |
+
# anymore.
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
if {$satisfied} {
|
| 291 |
+
return
|
| 292 |
+
}
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
# Fallback to previous command, if existing. See comment above about
|
| 296 |
+
# ::list...
|
| 297 |
+
|
| 298 |
+
if {[llength $original]} {
|
| 299 |
+
uplevel 1 $original [::linsert $args 0 $name]
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
# ::tcl::tm::Defaults --
|
| 304 |
+
#
|
| 305 |
+
# Determines the default search paths.
|
| 306 |
+
#
|
| 307 |
+
# Arguments
|
| 308 |
+
# None
|
| 309 |
+
#
|
| 310 |
+
# Results
|
| 311 |
+
# None.
|
| 312 |
+
#
|
| 313 |
+
# Side effects
|
| 314 |
+
# May add paths to the list of defaults.
|
| 315 |
+
|
| 316 |
+
proc ::tcl::tm::Defaults {} {
|
| 317 |
+
global env tcl_platform
|
| 318 |
+
|
| 319 |
+
regexp {^(\d+)\.(\d+)} [package provide Tcl] - major minor
|
| 320 |
+
set exe [file normalize [info nameofexecutable]]
|
| 321 |
+
|
| 322 |
+
# Note that we're using [::list], not [list] because [list] means
|
| 323 |
+
# something other than [::list] in this namespace.
|
| 324 |
+
roots [::list \
|
| 325 |
+
[file dirname [info library]] \
|
| 326 |
+
[file join [file dirname [file dirname $exe]] lib] \
|
| 327 |
+
]
|
| 328 |
+
|
| 329 |
+
if {$tcl_platform(platform) eq "windows"} {
|
| 330 |
+
set sep ";"
|
| 331 |
+
} else {
|
| 332 |
+
set sep ":"
|
| 333 |
+
}
|
| 334 |
+
for {set n $minor} {$n >= 0} {incr n -1} {
|
| 335 |
+
foreach ev [::list \
|
| 336 |
+
TCL${major}.${n}_TM_PATH \
|
| 337 |
+
TCL${major}_${n}_TM_PATH \
|
| 338 |
+
] {
|
| 339 |
+
if {![info exists env($ev)]} continue
|
| 340 |
+
foreach p [split $env($ev) $sep] {
|
| 341 |
+
path add $p
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
}
|
| 345 |
+
return
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
# ::tcl::tm::roots --
|
| 349 |
+
#
|
| 350 |
+
# Public API to the module path. See specification.
|
| 351 |
+
#
|
| 352 |
+
# Arguments
|
| 353 |
+
# paths - List of 'root' paths to derive search paths from.
|
| 354 |
+
#
|
| 355 |
+
# Results
|
| 356 |
+
# No result.
|
| 357 |
+
#
|
| 358 |
+
# Side effects
|
| 359 |
+
# Calls 'path add' to paths to the list of module search paths.
|
| 360 |
+
|
| 361 |
+
proc ::tcl::tm::roots {paths} {
|
| 362 |
+
regexp {^(\d+)\.(\d+)} [package provide Tcl] - major minor
|
| 363 |
+
foreach pa $paths {
|
| 364 |
+
set p [file join $pa tcl$major]
|
| 365 |
+
for {set n $minor} {$n >= 0} {incr n -1} {
|
| 366 |
+
set px [file join $p ${major}.${n}]
|
| 367 |
+
if {![interp issafe]} {set px [file normalize $px]}
|
| 368 |
+
path add $px
|
| 369 |
+
}
|
| 370 |
+
set px [file join $p site-tcl]
|
| 371 |
+
if {![interp issafe]} {set px [file normalize $px]}
|
| 372 |
+
path add $px
|
| 373 |
+
}
|
| 374 |
+
return
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
# Initialization. Set up the default paths, then insert the new handler into
|
| 378 |
+
# the chain.
|
| 379 |
+
|
| 380 |
+
if {![interp issafe]} {::tcl::tm::Defaults}
|
parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3dc594d5d6bfc95f658b6b65d274f873ce0eded4c018145fe10804b39f09153
|
| 3 |
+
size 406628
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/CompileTimeFunctionPointer.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
namespace impl {
|
| 7 |
+
namespace detail {
|
| 8 |
+
template<class FuncPtr, class ReturnType, class ParameterList> class WrapFunctionIntoFunctor_ {};
|
| 9 |
+
template<class FuncPtr, class ReturnType, class... Parameters>
|
| 10 |
+
class WrapFunctionIntoFunctor_<FuncPtr, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
|
| 11 |
+
public:
|
| 12 |
+
C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) {
|
| 13 |
+
return (*FuncPtr::func_ptr())(std::forward<Parameters>(args)...);
|
| 14 |
+
}
|
| 15 |
+
};
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
// WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor.
|
| 19 |
+
// Since it is a compile time function pointer, many compilers can inline it
|
| 20 |
+
// into the wrapper and you don't get any performance overhead for wrapping.
|
| 21 |
+
template<class FuncPtr>
|
| 22 |
+
struct WrapFunctionIntoFunctor final {
|
| 23 |
+
static_assert(c10::is_compile_time_function_pointer<FuncPtr>::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN.");
|
| 24 |
+
using type = detail::WrapFunctionIntoFunctor_<
|
| 25 |
+
FuncPtr,
|
| 26 |
+
typename guts::function_traits<typename FuncPtr::FuncType>::return_type,
|
| 27 |
+
typename guts::function_traits<typename FuncPtr::FuncType>::parameter_types
|
| 28 |
+
>;
|
| 29 |
+
};
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/util/TypeTraits.h>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
namespace impl {
|
| 8 |
+
namespace detail {
|
| 9 |
+
template<class FuncType, class ReturnType, class ParameterList> class WrapFunctionIntoRuntimeFunctor_ {};
|
| 10 |
+
template<class FuncType, class ReturnType, class... Parameters>
|
| 11 |
+
class WrapFunctionIntoRuntimeFunctor_<FuncType, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
|
| 12 |
+
public:
|
| 13 |
+
template<class FuncType_>
|
| 14 |
+
explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func)
|
| 15 |
+
: kernel_func_(std::forward<FuncType_>(kernel_func)) {}
|
| 16 |
+
|
| 17 |
+
decltype(auto) operator()(Parameters... args) {
|
| 18 |
+
return kernel_func_(std::forward<Parameters>(args)...);
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
private:
|
| 22 |
+
FuncType kernel_func_;
|
| 23 |
+
};
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
// WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that
|
| 27 |
+
// inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
|
| 28 |
+
// This can, for example, be used for lambdas, functors or even function pointers.
|
| 29 |
+
// In the case of function pointers, since it is a runtime function pointer,
|
| 30 |
+
// there is an overhead for calling it whenever the kernel is invoked.
|
| 31 |
+
template<class FuncType>
|
| 32 |
+
using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_<
|
| 33 |
+
FuncType,
|
| 34 |
+
typename guts::infer_function_traits_t<FuncType>::return_type,
|
| 35 |
+
typename guts::infer_function_traits_t<FuncType>::parameter_types
|
| 36 |
+
>;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// This file contains boxing (not unboxing) logic,
|
| 4 |
+
// i.e. how to make a vector<IValue> from a set of concrete arguments.
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/ivalue.h>
|
| 7 |
+
#include <ATen/core/stack.h>
|
| 8 |
+
#include <c10/core/TensorOptions.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/core/boxing/BoxedKernel.h>
|
| 11 |
+
|
| 12 |
+
#include <c10/util/Metaprogramming.h>
|
| 13 |
+
#include <type_traits>
|
| 14 |
+
|
| 15 |
+
namespace c10 {
|
| 16 |
+
namespace impl {
|
| 17 |
+
|
| 18 |
+
//
|
| 19 |
+
// utils
|
| 20 |
+
//
|
| 21 |
+
|
| 22 |
+
// is_mutable_tensor_ref
|
| 23 |
+
template <class T> struct is_mutable_tensor_ref : std::false_type {};
|
| 24 |
+
template <> struct is_mutable_tensor_ref<at::Tensor&> : std::true_type {};
|
| 25 |
+
|
| 26 |
+
// is_tuple_of_mutable_tensor_refs
|
| 27 |
+
//
|
| 28 |
+
template <class T, class Enable = void>
|
| 29 |
+
struct is_tuple_of_mutable_tensor_refs : std::false_type {};
|
| 30 |
+
|
| 31 |
+
template <class T>
|
| 32 |
+
struct is_tuple_of_mutable_tensor_refs<T, std::enable_if_t<guts::is_instantiation_of<std::tuple, T>::value, void>>
|
| 33 |
+
: guts::typelist::all<is_mutable_tensor_ref, guts::typelist::from_tuple_t<T>>
|
| 34 |
+
{};
|
| 35 |
+
|
| 36 |
+
// has_ivalue_to<T> tests the presence/absence of instance method IValue::to<T>()
|
| 37 |
+
//
|
| 38 |
+
template <class T, class Enable = void>
|
| 39 |
+
struct has_ivalue_to : std::false_type {};
|
| 40 |
+
|
| 41 |
+
template <class T>
|
| 42 |
+
struct ivalue_to_helper
|
| 43 |
+
{
|
| 44 |
+
using type = decltype(std::declval<IValue>().template to<T>());
|
| 45 |
+
};
|
| 46 |
+
template <class T>
|
| 47 |
+
using ivalue_to_helper_t = typename ivalue_to_helper<T>::type;
|
| 48 |
+
|
| 49 |
+
template <class T>
|
| 50 |
+
struct has_ivalue_to<T, std::void_t<ivalue_to_helper_t<T>>>
|
| 51 |
+
: std::true_type
|
| 52 |
+
{};
|
| 53 |
+
|
| 54 |
+
//
|
| 55 |
+
// boxing predicates
|
| 56 |
+
//
|
| 57 |
+
|
| 58 |
+
// A boxable arg type is one that IValue has a constructor for.
|
| 59 |
+
template <typename T>
|
| 60 |
+
using can_box =
|
| 61 |
+
std::disjunction<
|
| 62 |
+
std::is_constructible<IValue, std::decay_t<T>>,
|
| 63 |
+
// TensorOptions are not directly constructible into IValue,
|
| 64 |
+
// but torch::jit::push knows how to handle them
|
| 65 |
+
std::is_same<TensorOptions, std::decay_t<T>>
|
| 66 |
+
>;
|
| 67 |
+
|
| 68 |
+
template <typename... Ts>
|
| 69 |
+
using can_box_all = std::conjunction<can_box<Ts>...>;
|
| 70 |
+
|
| 71 |
+
// an unboxable result is one that can be extracted from an IValue
|
| 72 |
+
template <typename T>
|
| 73 |
+
using can_unbox =
|
| 74 |
+
std::conjunction<
|
| 75 |
+
std::disjunction<
|
| 76 |
+
has_ivalue_to<T>,
|
| 77 |
+
// void returns are ok
|
| 78 |
+
std::is_same<void, T>
|
| 79 |
+
>,
|
| 80 |
+
std::negation<std::is_lvalue_reference<T>>
|
| 81 |
+
>;
|
| 82 |
+
|
| 83 |
+
//
|
| 84 |
+
// boxArgs - utility for pushing unboxed args onto IValue stack
|
| 85 |
+
//
|
| 86 |
+
template <class... Args>
|
| 87 |
+
torch::jit::Stack boxArgs(Args... args) {
|
| 88 |
+
// TODO Reuse stack vector instead of allocating?
|
| 89 |
+
torch::jit::Stack stack;
|
| 90 |
+
stack.reserve(sizeof...(Args));
|
| 91 |
+
torch::jit::push(stack, std::forward<Args>(args)...);
|
| 92 |
+
return stack;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
template <class T>
|
| 96 |
+
static inline constexpr size_t boxed_size_one() {
|
| 97 |
+
static_assert(!std::is_same<std::decay_t<T>, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference");
|
| 98 |
+
return 1;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
// torch::jit::push pushes 4 values for a TensorOptions; this needs to
|
| 102 |
+
// be kept in sync.
|
| 103 |
+
template <>
|
| 104 |
+
inline constexpr size_t boxed_size_one<c10::TensorOptions>() {
|
| 105 |
+
return 4;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
// NOTE: this could probably be simplified with C++17 fold expressions.
|
| 109 |
+
template <typename...>
|
| 110 |
+
struct BoxedSize : std::integral_constant<size_t, 0> {};
|
| 111 |
+
template <class T, class... Args>
|
| 112 |
+
struct BoxedSize<T, Args...> : std::integral_constant<size_t, boxed_size_one<T>() + BoxedSize<Args...>::value> {};
|
| 113 |
+
|
| 114 |
+
template <class... Args>
|
| 115 |
+
static inline constexpr size_t boxed_size() {
|
| 116 |
+
return BoxedSize<Args...>::value;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
using IValueAlignedStorage = std::aligned_storage_t<sizeof(IValue), alignof(IValue)>;
|
| 120 |
+
|
| 121 |
+
template <typename T>
|
| 122 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) {
|
| 123 |
+
new (&dest[lastIdx]) IValue(arg);
|
| 124 |
+
lastIdx++;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) {
|
| 128 |
+
new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype()));
|
| 129 |
+
new (&dest[lastIdx++]) IValue(options.layout());
|
| 130 |
+
new (&dest[lastIdx++]) IValue(options.device());
|
| 131 |
+
new (&dest[lastIdx++]) IValue(options.pinned_memory());
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
inline void boxArgsToStack(IValueAlignedStorage*, int&) {}
|
| 135 |
+
|
| 136 |
+
template<typename T, typename... Args>
|
| 137 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) {
|
| 138 |
+
boxToStack(dest, arg, lastIdx);
|
| 139 |
+
boxArgsToStack(dest, lastIdx, args...);
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
//
|
| 143 |
+
// PopResult is a helper class whose specializations handle popping single and
|
| 144 |
+
// multiple return values, respectively.
|
| 145 |
+
//
|
| 146 |
+
template <class Result>
|
| 147 |
+
struct PopResult final {
|
| 148 |
+
static Result call(Stack& stack) {
|
| 149 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 150 |
+
stack.size() == 1,
|
| 151 |
+
"Boxed kernel was expected to return one value on the stack, ",
|
| 152 |
+
"but instead pushed ", stack.size(), " values."
|
| 153 |
+
);
|
| 154 |
+
return std::move(stack[0]).to<Result>();
|
| 155 |
+
}
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
template <class... Types>
|
| 159 |
+
struct PopResult<std::tuple<Types...>> final {
|
| 160 |
+
using Result = std::tuple<Types...>;
|
| 161 |
+
|
| 162 |
+
static Result call(Stack& stack) {
|
| 163 |
+
// for tuple return types, boxed kernel has pushed multiple values onto the stack
|
| 164 |
+
constexpr int RetCount = sizeof...(Types);
|
| 165 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 166 |
+
stack.size() == RetCount,
|
| 167 |
+
"Boxed kernel was expected to return ", RetCount, " values on the stack, ",
|
| 168 |
+
"but instead pushed ", stack.size(), " values."
|
| 169 |
+
);
|
| 170 |
+
return pop_to_tuple_impl(stack, std::make_index_sequence<RetCount>());
|
| 171 |
+
}
|
| 172 |
+
private:
|
| 173 |
+
// note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise.
|
| 174 |
+
// I'm sure there's an incantation that slips it past the parser but eh
|
| 175 |
+
template <size_t... indices>
|
| 176 |
+
static Result pop_to_tuple_impl(Stack& stack, std::index_sequence<indices...>) {
|
| 177 |
+
return std::make_tuple((std::move(stack[indices]).to<Types>())...);
|
| 178 |
+
}
|
| 179 |
+
};
|
| 180 |
+
|
| 181 |
+
//
|
| 182 |
+
// BoxedKernelWrapper
|
| 183 |
+
//
|
| 184 |
+
// For a given function type FT, BoxedKernelWrapper<FT> implements
|
| 185 |
+
// a `call` method that
|
| 186 |
+
// - takes a boxed kernel and unboxed arguments as specified by FT,
|
| 187 |
+
// - calls `boxArgs` to box the arguments
|
| 188 |
+
// - calls the boxed kernel
|
| 189 |
+
// - unboxes and returns the result
|
| 190 |
+
//
|
| 191 |
+
// The partial specializations below handle various cases: in
|
| 192 |
+
// particular, not all types appearing in op signatures are supported,
|
| 193 |
+
// and ops returning references have nonstandard wrapper implementations.
|
| 194 |
+
//
|
| 195 |
+
|
| 196 |
+
// 1. The base specialization of BoxedKernelWrapper should never be instantiated.
|
| 197 |
+
// A "no call method defined on BoxedKernelWrapper" compile error means that
|
| 198 |
+
// an op signature has failed to trigger any of the partial specializations
|
| 199 |
+
// that follow this one.
|
| 200 |
+
//
|
| 201 |
+
template <class FuncType, class Enable = void>
|
| 202 |
+
struct BoxedKernelWrapper {
|
| 203 |
+
// The reason we're not just doing straight up static_assert(false, ...) here:
|
| 204 |
+
// Basically, the way to make sure a static_assert only fires if a template
|
| 205 |
+
// is actually instantiated (rather than every time the file is parsed) is to use
|
| 206 |
+
// template parameters in the expression, e.g. FuncType here. However, since
|
| 207 |
+
// `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same
|
| 208 |
+
// effect.
|
| 209 |
+
static_assert(sizeof(FuncType) != sizeof(FuncType),
|
| 210 |
+
"Function signature contains one or more unsupported parameter and/or return types. "
|
| 211 |
+
"Look for a nearby error like "
|
| 212 |
+
"\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" "
|
| 213 |
+
"- (your function type) is the unsupported signature.");
|
| 214 |
+
};
|
| 215 |
+
|
| 216 |
+
//
|
| 217 |
+
// 2. Supported signatures, other than those involving non-const Tensor refs -
|
| 218 |
+
// i.e., "functional" ops.
|
| 219 |
+
//
|
| 220 |
+
|
| 221 |
+
template <class Result, class... Args>
|
| 222 |
+
struct BoxedKernelWrapper<
|
| 223 |
+
Result(Args...),
|
| 224 |
+
std::enable_if_t<
|
| 225 |
+
can_box_all<Args...>::value && can_unbox<Result>::value && !is_tuple_of_mutable_tensor_refs<Result>::value,
|
| 226 |
+
void
|
| 227 |
+
>
|
| 228 |
+
> {
|
| 229 |
+
static Result call(
|
| 230 |
+
const BoxedKernel& boxed_kernel_func,
|
| 231 |
+
const OperatorHandle& opHandle,
|
| 232 |
+
DispatchKeySet dispatchKeySet,
|
| 233 |
+
Args... args
|
| 234 |
+
) {
|
| 235 |
+
torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
|
| 236 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 237 |
+
|
| 238 |
+
if constexpr (!std::is_same_v<void, Result>) {
|
| 239 |
+
// op has pushed one or more values onto the stack.
|
| 240 |
+
return PopResult<Result>::call(stack);
|
| 241 |
+
} else {
|
| 242 |
+
// op returns void, boxed kernel has pushed nothing onto stack.
|
| 243 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 244 |
+
stack.empty(),
|
| 245 |
+
"Boxed kernel was expected to return no values on the stack, ",
|
| 246 |
+
"but instead returned ", stack.size(), " values."
|
| 247 |
+
);
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
};
|
| 251 |
+
|
| 252 |
+
//
|
| 253 |
+
// 3. in-place ops take a single non-const Tensor reference
|
| 254 |
+
// as their first argument, and return it.
|
| 255 |
+
//
|
| 256 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 257 |
+
// Because of this, the generated BoxedKernelWrapper specializations simply
|
| 258 |
+
// return the in-place argument.
|
| 259 |
+
//
|
| 260 |
+
|
| 261 |
+
template <class... OtherArgs>
|
| 262 |
+
struct BoxedKernelWrapper<
|
| 263 |
+
at::Tensor&(at::Tensor&, OtherArgs...),
|
| 264 |
+
std::enable_if_t<can_box_all<OtherArgs...>::value, void>
|
| 265 |
+
> {
|
| 266 |
+
static at::Tensor& call(
|
| 267 |
+
const BoxedKernel& boxed_kernel_func,
|
| 268 |
+
const OperatorHandle& opHandle,
|
| 269 |
+
DispatchKeySet dispatchKeySet,
|
| 270 |
+
at::Tensor& outArg, OtherArgs... otherArgs
|
| 271 |
+
) {
|
| 272 |
+
torch::jit::Stack stack = boxArgs<at::Tensor&, OtherArgs...>(outArg, std::forward<OtherArgs>(otherArgs)...);
|
| 273 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 274 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 275 |
+
stack.size() == 1,
|
| 276 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 277 |
+
"but instead returned ", stack.size(), " values."
|
| 278 |
+
);
|
| 279 |
+
|
| 280 |
+
return outArg;
|
| 281 |
+
}
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
//
|
| 285 |
+
// 3.5. In-process migration to make in-place ops take and return
|
| 286 |
+
// const references instead.
|
| 287 |
+
template <class... OtherArgs>
|
| 288 |
+
struct BoxedKernelWrapper<
|
| 289 |
+
const at::Tensor&(const at::Tensor&, OtherArgs...),
|
| 290 |
+
std::enable_if_t<can_box_all<OtherArgs...>::value, void>
|
| 291 |
+
> {
|
| 292 |
+
static const at::Tensor& call(
|
| 293 |
+
const BoxedKernel& boxed_kernel_func,
|
| 294 |
+
const OperatorHandle& opHandle,
|
| 295 |
+
DispatchKeySet dispatchKeySet,
|
| 296 |
+
const at::Tensor& outArg, OtherArgs... otherArgs
|
| 297 |
+
) {
|
| 298 |
+
torch::jit::Stack stack = boxArgs(outArg, otherArgs...);
|
| 299 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 300 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 301 |
+
stack.size() == 1,
|
| 302 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 303 |
+
"but instead returned ", stack.size(), " values."
|
| 304 |
+
);
|
| 305 |
+
|
| 306 |
+
return outArg;
|
| 307 |
+
}
|
| 308 |
+
};
|
| 309 |
+
|
| 310 |
+
//
|
| 311 |
+
// 4. out of place ops that take a single non-const Tensor reference as their
|
| 312 |
+
// final argument, and also return it.
|
| 313 |
+
//
|
| 314 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 315 |
+
// This assumption permits the generated BoxedKernelWrapper specializations to simply
|
| 316 |
+
// return out arguments.
|
| 317 |
+
//
|
| 318 |
+
template <class FirstArg, class... RestArgs>
|
| 319 |
+
struct BoxedKernelWrapper<
|
| 320 |
+
at::Tensor&(FirstArg, RestArgs...),
|
| 321 |
+
std::enable_if_t<
|
| 322 |
+
can_box_all<FirstArg, RestArgs...>::value
|
| 323 |
+
// this skips over in-place kernels with a non-const Tensor
|
| 324 |
+
// arg at the front, so those can unambiguously trigger the preceding specialization.
|
| 325 |
+
&& !is_mutable_tensor_ref<FirstArg>::value,
|
| 326 |
+
void
|
| 327 |
+
>
|
| 328 |
+
> {
|
| 329 |
+
static at::Tensor& call(
|
| 330 |
+
const BoxedKernel& boxed_kernel_func,
|
| 331 |
+
const OperatorHandle& opHandle,
|
| 332 |
+
DispatchKeySet dispatchKeySet,
|
| 333 |
+
FirstArg firstArg, RestArgs... restArgs
|
| 334 |
+
) {
|
| 335 |
+
torch::jit::Stack stack = boxArgs<FirstArg, RestArgs...>(std::forward<FirstArg>(firstArg), std::forward<RestArgs>(restArgs)...);
|
| 336 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 337 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 338 |
+
stack.size() == 1,
|
| 339 |
+
"Boxed kernel was expected to return a single value on the stack, ",
|
| 340 |
+
"but instead returned ", stack.size(), " values."
|
| 341 |
+
);
|
| 342 |
+
|
| 343 |
+
// reusing restArgs after it has been forwarded here is ok because we know
|
| 344 |
+
// that the last element is of type `Tensor&`.
|
| 345 |
+
return std::get<sizeof...(RestArgs) - 1>(std::tuple<RestArgs...>{restArgs...});
|
| 346 |
+
}
|
| 347 |
+
};
|
| 348 |
+
|
| 349 |
+
//
|
| 350 |
+
// 5. out of place ops that take multiple non-const Tensor references as their
|
| 351 |
+
// final arguments, and return them in a std::tuple.
|
| 352 |
+
//
|
| 353 |
+
// Note: all signatures matching this pattern are assumed to be for such ops.
|
| 354 |
+
// This assumption permits the generated BoxedKernelWrapper specializations to simply
|
| 355 |
+
// return the out arguments.
|
| 356 |
+
//
|
| 357 |
+
template <class Result, class... Args>
|
| 358 |
+
struct BoxedKernelWrapper<
|
| 359 |
+
Result(Args...),
|
| 360 |
+
std::enable_if_t<
|
| 361 |
+
can_box_all<Args...>::value && is_tuple_of_mutable_tensor_refs<Result>::value,
|
| 362 |
+
void
|
| 363 |
+
>
|
| 364 |
+
> {
|
| 365 |
+
static Result call(
|
| 366 |
+
const BoxedKernel& boxed_kernel_func,
|
| 367 |
+
const OperatorHandle& opHandle,
|
| 368 |
+
DispatchKeySet dispatchKeySet,
|
| 369 |
+
Args... args
|
| 370 |
+
) {
|
| 371 |
+
using ArgTuple = std::tuple<Args...>;
|
| 372 |
+
constexpr int RetCount = std::tuple_size<Result>();
|
| 373 |
+
|
| 374 |
+
torch::jit::Stack stack = boxArgs<Args...>(std::forward<Args>(args)...);
|
| 375 |
+
boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack);
|
| 376 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
|
| 377 |
+
stack.size() == RetCount,
|
| 378 |
+
"Boxed kernel was expected to return ", RetCount, " values on the stack, ",
|
| 379 |
+
"but instead returned ", stack.size(), " values."
|
| 380 |
+
);
|
| 381 |
+
|
| 382 |
+
// reusing args after it has been forwarded here is ok because we know
|
| 383 |
+
// that the last RetCount elements are of type `Tensor&`.
|
| 384 |
+
auto result = guts::tuple_take<ArgTuple, -RetCount>(ArgTuple{std::forward<Args>(args)...});
|
| 385 |
+
static_assert(
|
| 386 |
+
std::is_same<Result, decltype(result)>::value,
|
| 387 |
+
"The parameter list of an op returning a tuple of Tensor references "
|
| 388 |
+
"must end with an equal number of Tensor reference parameters."
|
| 389 |
+
);
|
| 390 |
+
return result;
|
| 391 |
+
}
|
| 392 |
+
};
|
| 393 |
+
|
| 394 |
+
} // impl
|
| 395 |
+
} // c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <gtest/gtest.h>
|
| 4 |
+
#include <gmock/gmock.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 8 |
+
#include <ATen/core/ivalue.h>
|
| 9 |
+
#include <c10/core/CPUAllocator.h>
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
|
| 12 |
+
template<class... Inputs>
|
| 13 |
+
inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) {
|
| 14 |
+
return {std::forward<Inputs>(inputs)...};
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) {
|
| 18 |
+
auto* allocator = c10::GetCPUAllocator();
|
| 19 |
+
int64_t nelements = 1;
|
| 20 |
+
auto dtype = caffe2::TypeMeta::Make<float>();
|
| 21 |
+
int64_t size_bytes = nelements * dtype.itemsize();
|
| 22 |
+
auto storage_impl = c10::make_intrusive<c10::StorageImpl>(
|
| 23 |
+
c10::StorageImpl::use_byte_size_t(),
|
| 24 |
+
size_bytes,
|
| 25 |
+
allocator->allocate(size_bytes),
|
| 26 |
+
allocator,
|
| 27 |
+
/*resizable=*/true);
|
| 28 |
+
at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype);
|
| 29 |
+
// TODO: We add this to simulate the ideal case where we only have Autograd backend keys
|
| 30 |
+
// on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl
|
| 31 |
+
// constructor by default.
|
| 32 |
+
if (!requires_grad) {
|
| 33 |
+
t.unsafeGetTensorImpl()->remove_autograd_key();
|
| 34 |
+
}
|
| 35 |
+
return t;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) {
|
| 39 |
+
return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
template<class... Args>
|
| 43 |
+
inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) {
|
| 44 |
+
auto stack = makeStack(std::forward<Args>(args)...);
|
| 45 |
+
op.callBoxed(&stack);
|
| 46 |
+
return stack;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
template<class Result, class... Args>
|
| 50 |
+
inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
|
| 51 |
+
return op.typed<Result(Args...)>().call(std::forward<Args>(args)...);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
template<class Result, class... Args>
|
| 55 |
+
inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
|
| 56 |
+
return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template<class Result, class... Args>
|
| 60 |
+
inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) {
|
| 61 |
+
return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
|
| 65 |
+
auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
|
| 66 |
+
EXPECT_ANY_THROW(
|
| 67 |
+
callOp(*op, dummyTensor(dispatch_key), 5);
|
| 68 |
+
);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
inline void expectDoesntFindOperator(const char* op_name) {
|
| 72 |
+
auto op = c10::Dispatcher::singleton().findSchema({op_name, ""});
|
| 73 |
+
EXPECT_FALSE(op.has_value());
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template<class Exception, class Functor>
|
| 77 |
+
inline void expectThrows(Functor&& functor, const char* expectMessageContains) {
|
| 78 |
+
try {
|
| 79 |
+
std::forward<Functor>(functor)();
|
| 80 |
+
} catch (const Exception& e) {
|
| 81 |
+
EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains));
|
| 82 |
+
return;
|
| 83 |
+
}
|
| 84 |
+
ADD_FAILURE() << "Expected to throw exception containing \""
|
| 85 |
+
<< expectMessageContains << "\" but didn't throw";
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template<class T, size_t N>
|
| 89 |
+
void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) {
|
| 90 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 91 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 92 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 93 |
+
}
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
template<class T>
|
| 97 |
+
void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) {
|
| 98 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 99 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 100 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
template<class T>
|
| 105 |
+
void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) {
|
| 106 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 107 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 108 |
+
EXPECT_EQ(expected[i], actual.get(i));
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
template<class T>
|
| 113 |
+
void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) {
|
| 114 |
+
EXPECT_EQ(expected.size(), actual.size());
|
| 115 |
+
for (const auto i : c10::irange(expected.size())) {
|
| 116 |
+
EXPECT_EQ(expected[i], actual[i]);
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
// NB: This is not really sound, but all of the type sets constructed here
|
| 121 |
+
// are singletons so it's fine
|
| 122 |
+
static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) {
|
| 123 |
+
return legacyExtractDispatchKey(t.key_set());
|
| 124 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <typeindex>
|
| 4 |
+
#include <c10/core/DispatchKeySet.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/Metaprogramming.h>
|
| 7 |
+
#include <c10/util/Type.h>
|
| 8 |
+
|
| 9 |
+
namespace c10 {
|
| 10 |
+
namespace impl {
|
| 11 |
+
|
| 12 |
+
// A CppSignature object holds RTTI information about a C++ function signature at runtime
|
| 13 |
+
// and can compare them or get a debug-printable name.
|
| 14 |
+
class TORCH_API CppSignature final {
|
| 15 |
+
public:
|
| 16 |
+
CppSignature(const CppSignature&) = default;
|
| 17 |
+
CppSignature(CppSignature&&) noexcept = default;
|
| 18 |
+
CppSignature& operator=(const CppSignature&) = default;
|
| 19 |
+
CppSignature& operator=(CppSignature&&) noexcept = default;
|
| 20 |
+
|
| 21 |
+
template<class FuncType>
|
| 22 |
+
static CppSignature make() {
|
| 23 |
+
// Normalize functors, lambdas, function pointers, etc. into the plain function type
|
| 24 |
+
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
|
| 25 |
+
// We do this to guarantee that all CppSignature's for an operator will match, even if they're registered
|
| 26 |
+
// with different calling conventions.
|
| 27 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 28 |
+
using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type;
|
| 29 |
+
|
| 30 |
+
return CppSignature(std::type_index(typeid(decayed_function_type)));
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
std::string name() const {
|
| 34 |
+
return c10::demangle(signature_.name());
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) {
|
| 38 |
+
if (lhs.signature_ == rhs.signature_) {
|
| 39 |
+
return true;
|
| 40 |
+
}
|
| 41 |
+
// Without RTLD_GLOBAL, the type_index comparison could yield false because
|
| 42 |
+
// they point to different instances of the RTTI data, but the types would
|
| 43 |
+
// still be the same. Let's check for that case too.
|
| 44 |
+
// Note that there still is a case where this might not work, i.e. when
|
| 45 |
+
// linking libraries of different compilers together, they might have
|
| 46 |
+
// different ways to serialize a type name. That, together with a missing
|
| 47 |
+
// RTLD_GLOBAL, would still fail this.
|
| 48 |
+
if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) {
|
| 49 |
+
return true;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
return false;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
private:
|
| 56 |
+
explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {}
|
| 57 |
+
std::type_index signature_;
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) {
|
| 61 |
+
return !(lhs == rhs );
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
}
|
| 65 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
#include <ATen/core/function_schema.h>
|
| 5 |
+
#include <ATen/core/jit_type.h>
|
| 6 |
+
#include <c10/util/Bitset.h>
|
| 7 |
+
#include <c10/core/DispatchKeySet.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#include <ATen/core/Variadic.h>
|
| 10 |
+
#include <ATen/core/stack.h>
|
| 11 |
+
|
| 12 |
+
namespace c10 {
|
| 13 |
+
|
| 14 |
+
namespace impl {
|
| 15 |
+
|
| 16 |
+
// Take a DispatchKeySet for a Tensor and determine what the actual dispatch
|
| 17 |
+
// DispatchKey should be, taking into account TLS, and skipping backends which
|
| 18 |
+
// fall through.
|
| 19 |
+
//
|
| 20 |
+
// Unlike Tensor::key_set(), the value of this on a tensor can change depending
|
| 21 |
+
// on TLS.
|
| 22 |
+
//
|
| 23 |
+
// NB: If there is no valid dispatch key, this will return Undefined
|
| 24 |
+
inline DispatchKeySet computeDispatchKeySet(
|
| 25 |
+
DispatchKeySet ks,
|
| 26 |
+
// The key mask lets us eliminate (by zero entries) keys which should not
|
| 27 |
+
// be considered for dispatch. There are two cases when we use this:
|
| 28 |
+
//
|
| 29 |
+
// - If an operator's dispatch table contains a fallthrough entry, we
|
| 30 |
+
// should bypass it entirely when finding the key
|
| 31 |
+
// - If a user invokes with redispatch, the mask lets us
|
| 32 |
+
// zero out the key the user asked us to stop.
|
| 33 |
+
//
|
| 34 |
+
// These excluded backends are NOT tracked in the TLS, but must be applied
|
| 35 |
+
// AFTER TLS (since the backend may have been introduced for consideration
|
| 36 |
+
// by the included TLS), which is why you have to pass them in to this
|
| 37 |
+
// function (as opposed to just applying it to the input 'ks').
|
| 38 |
+
DispatchKeySet key_mask
|
| 39 |
+
) {
|
| 40 |
+
c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set();
|
| 41 |
+
// TODO: It's a bit irritating that we have to do logical ORs here, it would
|
| 42 |
+
// be nice to only do one. Can always_included be folded into the TLS? Well,
|
| 43 |
+
// it's a bit troublesome, because fastpath TLS access requires the type of
|
| 44 |
+
// the TLS in question to be zero-initialized, so you don't actually win
|
| 45 |
+
// anyting in that case.
|
| 46 |
+
return (((ks | local.included_) - local.excluded_) & key_mask);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
namespace detail {
|
| 52 |
+
// A small gadget to extract the DispatchKeySet from types which are known
|
| 53 |
+
// to have it. Used to extract dispatch keys from unboxed calls.
|
| 54 |
+
struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> {
|
| 55 |
+
DispatchKeySet ts;
|
| 56 |
+
void operator()(const at::Tensor& x) {
|
| 57 |
+
ts = ts | x.key_set();
|
| 58 |
+
}
|
| 59 |
+
void operator()(const std::optional<at::Tensor>& x) {
|
| 60 |
+
if (x.has_value()) {
|
| 61 |
+
ts = ts | x->key_set();
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
void operator()(at::ArrayRef<at::Tensor> xs) {
|
| 65 |
+
for (const auto& x : xs) {
|
| 66 |
+
ts = ts | x.key_set();
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
// Tensor?[] translates to this case.
|
| 70 |
+
void operator()(const c10::List<std::optional<at::Tensor>>& xs) {
|
| 71 |
+
for (std::optional<at::Tensor> x : xs) {
|
| 72 |
+
if (x.has_value()) {
|
| 73 |
+
ts = ts | x.value().key_set();
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
// Structured Tensor[] translates to this case
|
| 78 |
+
void operator()(const at::ITensorListRef& xs) {
|
| 79 |
+
for (const auto& x : xs) {
|
| 80 |
+
ts = ts | x.key_set();
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
[[noreturn]] void operator()(at::ArrayRef<std::optional<at::Tensor>>) {
|
| 84 |
+
// Just checking that the handling of Tensor?[] didn't change.
|
| 85 |
+
TORCH_INTERNAL_ASSERT(false);
|
| 86 |
+
}
|
| 87 |
+
void operator()(const at::Generator& gen) {
|
| 88 |
+
if (gen.defined()) {
|
| 89 |
+
ts = ts | gen.key_set();
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
void operator()(const std::optional<at::Generator>& gen) {
|
| 93 |
+
if (gen.has_value() && gen->defined()) {
|
| 94 |
+
ts = ts | gen->key_set();
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
template <typename T>
|
| 98 |
+
void operator()(const T&) {
|
| 99 |
+
// do nothing
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
// NB: take by const reference (Don't do universal forwarding here! You
|
| 104 |
+
// don't want to move into this function!)
|
| 105 |
+
template <typename... Args>
|
| 106 |
+
DispatchKeySet multi_dispatch_key_set(const Args&... args) {
|
| 107 |
+
return MultiDispatchKeySet().apply(args...).ts;
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/**
|
| 112 |
+
* An instance of DispatchKeyExtractor knows how to get a dispatch key given
|
| 113 |
+
* a list of arguments for an operator call.
|
| 114 |
+
*
|
| 115 |
+
* The instance is specific for a certain operator as:
|
| 116 |
+
* - In boxed dispatch, different operators have different ways to extract
|
| 117 |
+
* the dispatch key (e.g. different numbers of arguments), and we precompute
|
| 118 |
+
* the stack locations we should look at; and
|
| 119 |
+
* - In all dispatch, some backends should be excluded from dispatch because
|
| 120 |
+
* they have been registered as fallthrough. The set of excluded backends
|
| 121 |
+
* varies from operator, as some operators may have overridden the
|
| 122 |
+
* fallthrough with custom behavior.
|
| 123 |
+
*
|
| 124 |
+
* Note - this should maintain identical impl to the py dispatcher key extraction logic
|
| 125 |
+
* at pytorch/torch/dispatcher.py
|
| 126 |
+
*/
|
| 127 |
+
struct TORCH_API DispatchKeyExtractor final {
|
| 128 |
+
public:
|
| 129 |
+
static DispatchKeyExtractor make(const FunctionSchema& schema) {
|
| 130 |
+
return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema));
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
static DispatchKeyExtractor makeUninitialized() {
|
| 134 |
+
return DispatchKeyExtractor(c10::utils::bitset());
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
void registerSchema(const FunctionSchema& schema) {
|
| 138 |
+
TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset());
|
| 139 |
+
dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema);
|
| 140 |
+
}
|
| 141 |
+
void deregisterSchema() {
|
| 142 |
+
dispatch_arg_indices_reverse_ = c10::utils::bitset();
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const {
|
| 146 |
+
DispatchKeySet ks;
|
| 147 |
+
dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) {
|
| 148 |
+
const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1);
|
| 149 |
+
if (C10_LIKELY(ivalue.isTensor())) {
|
| 150 |
+
// NB: Take care not to introduce a refcount bump (there's
|
| 151 |
+
// no safe toTensorRef method, alas)
|
| 152 |
+
ks = ks | ivalue.unsafeToTensorImpl()->key_set();
|
| 153 |
+
} else if (C10_UNLIKELY(ivalue.isTensorList())) {
|
| 154 |
+
for (const at::Tensor& tensor : ivalue.toTensorList()) {
|
| 155 |
+
ks = ks | tensor.key_set();
|
| 156 |
+
}
|
| 157 |
+
}
|
| 158 |
+
// Tensor?[] translates to a c10::List<IValue> so we need to peek inside
|
| 159 |
+
else if (C10_UNLIKELY(ivalue.isList())) {
|
| 160 |
+
for (const auto& elt : ivalue.toListRef()) {
|
| 161 |
+
if (elt.isTensor()) {
|
| 162 |
+
ks = ks | elt.toTensor().key_set();
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
});
|
| 167 |
+
// Keys that are fallthrough should be skipped
|
| 168 |
+
if (requiresBitsetPerBackend_) {
|
| 169 |
+
auto backend_idx = ks.getBackendIndex();
|
| 170 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
|
| 171 |
+
} else {
|
| 172 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
template<class... Args>
|
| 177 |
+
DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const {
|
| 178 |
+
auto ks = detail::multi_dispatch_key_set(args...);
|
| 179 |
+
// Keys that are fallthrough should be skipped
|
| 180 |
+
if (requiresBitsetPerBackend_) {
|
| 181 |
+
auto backend_idx = ks.getBackendIndex();
|
| 182 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]);
|
| 183 |
+
} else {
|
| 184 |
+
return impl::computeDispatchKeySet(ks, nonFallthroughKeys_);
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough);
|
| 189 |
+
|
| 190 |
+
std::string dumpState() const;
|
| 191 |
+
void checkInvariants(const FunctionSchema& schema) const;
|
| 192 |
+
|
| 193 |
+
private:
|
| 194 |
+
static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) {
|
| 195 |
+
TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(),
|
| 196 |
+
"The function schema has ", schema.arguments().size(),
|
| 197 |
+
" arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS());
|
| 198 |
+
c10::utils::bitset dispatch_arg_indices_reverse;
|
| 199 |
+
for (const auto index : c10::irange(schema.arguments().size())) {
|
| 200 |
+
if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) ||
|
| 201 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 202 |
+
*ListType::ofTensors()) ||
|
| 203 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 204 |
+
*ListType::ofOptionalTensors()) ||
|
| 205 |
+
schema.arguments()[index].type()->isSubtypeOf(
|
| 206 |
+
*OptionalType::ofTensor())) {
|
| 207 |
+
dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index);
|
| 208 |
+
}
|
| 209 |
+
}
|
| 210 |
+
return dispatch_arg_indices_reverse;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse)
|
| 214 |
+
: dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse)
|
| 215 |
+
, nonFallthroughKeys_(DispatchKeySet::FULL)
|
| 216 |
+
, requiresBitsetPerBackend_(false) {
|
| 217 |
+
for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) {
|
| 218 |
+
nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL;
|
| 219 |
+
}
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
// this is a bitset that has ones for each argument index which has to be
|
| 223 |
+
// considered for dispatch. This avoids having to iterate over the stack
|
| 224 |
+
// to find all the tensors. The bits are stored in reverse order, i.e.
|
| 225 |
+
// dispatch_arg_indices_reverse_[i] == true, then the i-th argument from
|
| 226 |
+
// the top of the stack (i.e. the i-th last argument of the function)
|
| 227 |
+
// is relevant for dispatch.
|
| 228 |
+
// dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the
|
| 229 |
+
// fallthrough
|
| 230 |
+
c10::utils::bitset dispatch_arg_indices_reverse_;
|
| 231 |
+
|
| 232 |
+
// Set of functionality keys for which the operator does NOT have fallthrough kernel.
|
| 233 |
+
DispatchKeySet nonFallthroughKeys_;
|
| 234 |
+
// Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND.
|
| 235 |
+
// This is only needed if we know that the operator has a different set of fallthroughs defined for some backends.
|
| 236 |
+
std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_;
|
| 237 |
+
// Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path),
|
| 238 |
+
// or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_
|
| 239 |
+
bool requiresBitsetPerBackend_;
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/SequenceNumber.h>
|
| 4 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 5 |
+
#include <ATen/core/boxing/impl/boxing.h>
|
| 6 |
+
#include <ATen/core/dispatch/OperatorEntry.h>
|
| 7 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 8 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 9 |
+
#include <ATen/record_function.h>
|
| 10 |
+
#include <c10/util/Exception.h>
|
| 11 |
+
#include <c10/util/LeftRight.h>
|
| 12 |
+
#include <list>
|
| 13 |
+
#include <mutex>
|
| 14 |
+
#include <condition_variable>
|
| 15 |
+
#include <type_traits>
|
| 16 |
+
#include <c10/core/SafePyObject.h>
|
| 17 |
+
|
| 18 |
+
#include <ATen/core/grad_mode.h>
|
| 19 |
+
#include <ATen/core/enum_tag.h>
|
| 20 |
+
|
| 21 |
+
#ifndef NDEBUG
|
| 22 |
+
#include <iostream>
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
namespace c10 {
|
| 26 |
+
|
| 27 |
+
TORCH_API bool show_dispatch_trace();
|
| 28 |
+
TORCH_API void dispatch_trace_nesting_incr();
|
| 29 |
+
TORCH_API void dispatch_trace_nesting_decr();
|
| 30 |
+
TORCH_API int64_t dispatch_trace_nesting_value();
|
| 31 |
+
|
| 32 |
+
struct DispatchTraceNestingGuard {
|
| 33 |
+
DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); }
|
| 34 |
+
~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); }
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
class TORCH_API OperatorHandle;
|
| 38 |
+
template<class FuncType> class TypedOperatorHandle;
|
| 39 |
+
|
| 40 |
+
/**
|
| 41 |
+
* Implement this interface and register your instance with the dispatcher
|
| 42 |
+
* to get notified when operators are registered or deregistered with
|
| 43 |
+
* the dispatcher.
|
| 44 |
+
*
|
| 45 |
+
* NB: registration events only occur when a 'def' occurs; we don't trigger
|
| 46 |
+
* on 'impl' or 'fallback' calls.
|
| 47 |
+
*/
|
| 48 |
+
class TORCH_API OpRegistrationListener {
|
| 49 |
+
public:
|
| 50 |
+
virtual ~OpRegistrationListener();
|
| 51 |
+
|
| 52 |
+
virtual void onOperatorRegistered(const OperatorHandle& op) = 0;
|
| 53 |
+
virtual void onOperatorDeregistered(const OperatorHandle& op) = 0;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
namespace detail {
|
| 57 |
+
class RegistrationListenerList;
|
| 58 |
+
}
|
| 59 |
+
class SchemaRegistrationHandleRAII;
|
| 60 |
+
|
| 61 |
+
/**
|
| 62 |
+
* Top-level dispatch interface for dispatching via the dynamic dispatcher.
|
| 63 |
+
* Most end users shouldn't use this directly; if you're trying to register
|
| 64 |
+
* ops look in op_registration
|
| 65 |
+
*/
|
| 66 |
+
class TORCH_API Dispatcher final {
|
| 67 |
+
private:
|
| 68 |
+
// For direct access to backend fallback information
|
| 69 |
+
friend class impl::OperatorEntry;
|
| 70 |
+
|
| 71 |
+
struct OperatorDef final {
|
| 72 |
+
explicit OperatorDef(OperatorName&& op_name)
|
| 73 |
+
: op(std::move(op_name)) {}
|
| 74 |
+
|
| 75 |
+
impl::OperatorEntry op;
|
| 76 |
+
|
| 77 |
+
// These refer to the number of outstanding RegistrationHandleRAII
|
| 78 |
+
// for this operator. def_count reflects only def() registrations
|
| 79 |
+
// (in the new world, this should only ever be 1, but old style
|
| 80 |
+
// registrations may register the schema multiple times, which
|
| 81 |
+
// will increase this count). def_and_impl_count reflects the number
|
| 82 |
+
// of combined def() and impl() registrations. When the last def() gets
|
| 83 |
+
// unregistered, we must immediately call the Deregistered listeners, but we
|
| 84 |
+
// must not actually delete the handle as there are other outstanding RAII
|
| 85 |
+
// destructors which will try to destruct and they had better still have a
|
| 86 |
+
// working operator handle in this case
|
| 87 |
+
size_t def_count = 0;
|
| 88 |
+
size_t def_and_impl_count = 0;
|
| 89 |
+
};
|
| 90 |
+
friend class OperatorHandle;
|
| 91 |
+
template<class> friend class TypedOperatorHandle;
|
| 92 |
+
|
| 93 |
+
struct Guard final {
|
| 94 |
+
Guard() : alive(true), mutex() {}
|
| 95 |
+
std::atomic<bool> alive;
|
| 96 |
+
std::mutex mutex;
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
public:
|
| 100 |
+
~Dispatcher();
|
| 101 |
+
|
| 102 |
+
// Implementation note: this class abstracts over the fact that we have per-operator
|
| 103 |
+
// dispatch tables. This could be easily adjusted to have a single global hash
|
| 104 |
+
// table.
|
| 105 |
+
static Dispatcher& realSingleton();
|
| 106 |
+
|
| 107 |
+
C10_ALWAYS_INLINE static Dispatcher& singleton() {
|
| 108 |
+
#if !defined C10_MOBILE
|
| 109 |
+
// Implemented inline so that steady-state code needn't incur
|
| 110 |
+
// function-call overhead. We can't just inline `realSingleton`
|
| 111 |
+
// because the function-local static would get duplicated across
|
| 112 |
+
// all DSOs that include & use this header, leading to multiple
|
| 113 |
+
// singleton instances.
|
| 114 |
+
static Dispatcher& s = realSingleton();
|
| 115 |
+
return s;
|
| 116 |
+
#else
|
| 117 |
+
// For C10_MOBILE, we should never inline a static function that
|
| 118 |
+
// has a static member, since the generated code calls
|
| 119 |
+
// __cxa_guard_acquire and __cxa_guard_release which help
|
| 120 |
+
// implement exactly once semantics for the initialization of the
|
| 121 |
+
// static Dispatcher& s above (for the non-mobile case). That
|
| 122 |
+
// additional code when duplicated across all operator stubs
|
| 123 |
+
// for every backend results in a lot of additional code
|
| 124 |
+
// being generated by the compiler.
|
| 125 |
+
return realSingleton();
|
| 126 |
+
#endif
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
// ------------------------------------------------------------------------
|
| 130 |
+
//
|
| 131 |
+
// Accessing operators by schema
|
| 132 |
+
//
|
| 133 |
+
// ------------------------------------------------------------------------
|
| 134 |
+
|
| 135 |
+
/**
|
| 136 |
+
* Looks for an operator schema with the given name and overload name
|
| 137 |
+
* and returns it if it is registered WITH A SCHEMA.
|
| 138 |
+
* Returns nullopt otherwise.
|
| 139 |
+
*/
|
| 140 |
+
std::optional<OperatorHandle> findSchema(const OperatorName& operator_name);
|
| 141 |
+
|
| 142 |
+
/**
|
| 143 |
+
* Variant of findSchema that results in less code generated at the call site.
|
| 144 |
+
* It (1) takes const char* pointer rather than OperatorName (so we skip
|
| 145 |
+
* generating std::string constructor calls at the call site), and (2)
|
| 146 |
+
* it raises an exception if the operator is not found (so we skip
|
| 147 |
+
* generating exception raising code at the call site)
|
| 148 |
+
*
|
| 149 |
+
* Irritatingly, we still have to generate the handful of instructions
|
| 150 |
+
* for dealing with an exception being thrown during static initialization
|
| 151 |
+
* (e.g. __cxa_guard_abort). If we could annotate this method noexcept we
|
| 152 |
+
* could avoid this code too, but as the name of the function suggests,
|
| 153 |
+
* it does throw exceptions.
|
| 154 |
+
*/
|
| 155 |
+
OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name);
|
| 156 |
+
|
| 157 |
+
// Like findSchema, but also returns OperatorHandle even if there is no schema
|
| 158 |
+
std::optional<OperatorHandle> findOp(const OperatorName& operator_name);
|
| 159 |
+
|
| 160 |
+
// Returns a list of all operator names present in the operatorLookupTable_
|
| 161 |
+
const std::vector<OperatorName> getAllOpNames();
|
| 162 |
+
|
| 163 |
+
// ------------------------------------------------------------------------
|
| 164 |
+
//
|
| 165 |
+
// Invoking operators
|
| 166 |
+
//
|
| 167 |
+
// ------------------------------------------------------------------------
|
| 168 |
+
|
| 169 |
+
template<class Return, class... Args>
|
| 170 |
+
Return call(const TypedOperatorHandle<Return (Args...)>& op, Args... args) const;
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
template<class Return, class... Args>
|
| 174 |
+
static Return callWithDispatchKeySlowPath(const TypedOperatorHandle<Return (Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args);
|
| 175 |
+
|
| 176 |
+
// Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation.
|
| 177 |
+
// This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set.
|
| 178 |
+
// Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key.
|
| 179 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 180 |
+
template<class Return, class... Args>
|
| 181 |
+
Return redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const;
|
| 182 |
+
|
| 183 |
+
// Invoke an operator via the boxed calling convention using an IValue stack
|
| 184 |
+
void callBoxed(const OperatorHandle& op, Stack* stack) const;
|
| 185 |
+
void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const;
|
| 186 |
+
|
| 187 |
+
// TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none)
|
| 188 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 189 |
+
void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const;
|
| 190 |
+
|
| 191 |
+
bool hasBackendFallbackForDispatchKey(DispatchKey dk) {
|
| 192 |
+
auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk);
|
| 193 |
+
if (dispatch_ix < 0) return false;
|
| 194 |
+
return backendFallbackKernels_[dispatch_ix].kernel.isValid();
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
// Used by torchdeploy/multipy for multiple interpreters racing.
|
| 198 |
+
void waitForDef(const FunctionSchema& schema);
|
| 199 |
+
void waitForImpl(const OperatorName& op_name, std::optional<DispatchKey> dispatch_key);
|
| 200 |
+
|
| 201 |
+
// ------------------------------------------------------------------------
|
| 202 |
+
//
|
| 203 |
+
// Performing registrations (NON user public; use op_registration)
|
| 204 |
+
//
|
| 205 |
+
// ------------------------------------------------------------------------
|
| 206 |
+
|
| 207 |
+
/**
|
| 208 |
+
* Register a new operator schema.
|
| 209 |
+
*
|
| 210 |
+
* If a schema with the same operator name and overload name already exists,
|
| 211 |
+
* this function will check that both schemas are exactly identical.
|
| 212 |
+
*/
|
| 213 |
+
RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector<at::Tag> tags = {});
|
| 214 |
+
|
| 215 |
+
/**
|
| 216 |
+
* Register a kernel to the dispatch table for an operator.
|
| 217 |
+
* If dispatch_key is nullopt, then this registers a fallback kernel.
|
| 218 |
+
*
|
| 219 |
+
* @return A RAII object that manages the lifetime of the registration.
|
| 220 |
+
* Once that object is destructed, the kernel will be deregistered.
|
| 221 |
+
*/
|
| 222 |
+
// NB: steals the inferred function schema, as we may need to hold on to
|
| 223 |
+
// it for a bit until the real schema turns up
|
| 224 |
+
RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional<DispatchKey> dispatch_key, KernelFunction kernel, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema> inferred_function_schema, std::string debug);
|
| 225 |
+
|
| 226 |
+
/**
|
| 227 |
+
* Given an operator, tells the Dispatcher that we have implemented a fake impl
|
| 228 |
+
* for this op in the given Python module. Call this a "pystub".
|
| 229 |
+
*/
|
| 230 |
+
RegistrationHandleRAII registerPythonModule(const OperatorName& op_name, const char* pymodule, const char* context);
|
| 231 |
+
|
| 232 |
+
/**
|
| 233 |
+
* Given an operator, throws if we have a pystub.
|
| 234 |
+
*/
|
| 235 |
+
void throwIfHasPythonModule(OperatorName op_name);
|
| 236 |
+
|
| 237 |
+
std::optional<std::pair<const char*, const char*>> getPyStub(OperatorName op_name);
|
| 238 |
+
|
| 239 |
+
/**
|
| 240 |
+
* Register a new operator by name.
|
| 241 |
+
*/
|
| 242 |
+
RegistrationHandleRAII registerName(OperatorName op_name);
|
| 243 |
+
|
| 244 |
+
/**
|
| 245 |
+
* Register a fallback kernel for a backend.
|
| 246 |
+
* If an operator is called but there is no concrete kernel for the dispatch
|
| 247 |
+
* key of the given operator arguments, it will check if there is such a
|
| 248 |
+
* fallback kernel for the given dispatch key and, if yes, call that one.
|
| 249 |
+
*/
|
| 250 |
+
RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug);
|
| 251 |
+
|
| 252 |
+
/**
|
| 253 |
+
* Use to register whenever we had a TORCH_LIBRARY declaration in the frontend
|
| 254 |
+
* API. These invocations are only permitted once per program, so we raise
|
| 255 |
+
* an error if this is called again for the same namespace.
|
| 256 |
+
*/
|
| 257 |
+
RegistrationHandleRAII registerLibrary(std::string ns, std::string debug);
|
| 258 |
+
|
| 259 |
+
// ------------------------------------------------------------------------
|
| 260 |
+
//
|
| 261 |
+
// Listeners on registrations
|
| 262 |
+
//
|
| 263 |
+
// ------------------------------------------------------------------------
|
| 264 |
+
|
| 265 |
+
/**
|
| 266 |
+
* Add a listener that gets called whenever a new op is registered or an existing
|
| 267 |
+
* op is deregistered. Immediately after registering, this listener gets called
|
| 268 |
+
* for all previously registered ops, so it can be used to keep track of ops
|
| 269 |
+
* registered with this dispatcher.
|
| 270 |
+
*/
|
| 271 |
+
RegistrationHandleRAII addRegistrationListener(std::unique_ptr<OpRegistrationListener> listener);
|
| 272 |
+
|
| 273 |
+
void checkInvariants() const;
|
| 274 |
+
|
| 275 |
+
//
|
| 276 |
+
// ------------------------------------------------------------------------
|
| 277 |
+
//
|
| 278 |
+
// Assertions
|
| 279 |
+
//
|
| 280 |
+
// ------------------------------------------------------------------------
|
| 281 |
+
|
| 282 |
+
/**
|
| 283 |
+
* For testing purposes.
|
| 284 |
+
* Returns a list of all operators that were created through calls to registerImpl(),
|
| 285 |
+
* without any corresponding calls to registerDef(). After static initialization
|
| 286 |
+
* is done this is almost certainly a bug, as the created OperatorHandle won't have
|
| 287 |
+
* any schema associated with it and users calling the op through the dispatcher
|
| 288 |
+
* won't be able to access it
|
| 289 |
+
*
|
| 290 |
+
* Note that we cannot enforce this invariant "as we go" during static initialization,
|
| 291 |
+
* due to undefined static initialization order- we have no guarantees over the order
|
| 292 |
+
* in which .def() and .impl() calls are registered in the dispatcher at static
|
| 293 |
+
* initialization time. So this function should only be called after static initialization.
|
| 294 |
+
*/
|
| 295 |
+
std::vector<OperatorHandle> findDanglingImpls() const;
|
| 296 |
+
|
| 297 |
+
/**
|
| 298 |
+
* Useful for inspecting global Dispatcher registration state.
|
| 299 |
+
* Returns the names of all operators with a kernel registered for the specified DispatchKey.
|
| 300 |
+
* If no DispatchKey is specified, it returns all registered operators.
|
| 301 |
+
*/
|
| 302 |
+
std::vector<OperatorName> getRegistrationsForDispatchKey(std::optional<DispatchKey> k) const;
|
| 303 |
+
|
| 304 |
+
private:
|
| 305 |
+
Dispatcher();
|
| 306 |
+
|
| 307 |
+
static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
|
| 308 |
+
static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet);
|
| 309 |
+
static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef<const c10::IValue> args);
|
| 310 |
+
|
| 311 |
+
#ifdef FBCODE_CAFFE2
|
| 312 |
+
static bool profilingOperatorEvents();
|
| 313 |
+
static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref);
|
| 314 |
+
static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref);
|
| 315 |
+
#endif // FBCODE_CAFFE2
|
| 316 |
+
|
| 317 |
+
OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema);
|
| 318 |
+
OperatorHandle findOrRegisterName_(const OperatorName& op_name);
|
| 319 |
+
|
| 320 |
+
void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name);
|
| 321 |
+
void deregisterImpl_(
|
| 322 |
+
const OperatorHandle& op,
|
| 323 |
+
const OperatorName& op_name,
|
| 324 |
+
std::optional<DispatchKey> dispatch_key,
|
| 325 |
+
impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle);
|
| 326 |
+
void deregisterName_(const OperatorHandle& op, const OperatorName& op_name);
|
| 327 |
+
void deregisterFallback_(DispatchKey dispatchKey);
|
| 328 |
+
void deregisterLibrary_(const std::string& ns);
|
| 329 |
+
void cleanup(const OperatorHandle& op, const OperatorName& op_name);
|
| 330 |
+
void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug);
|
| 331 |
+
|
| 332 |
+
std::list<OperatorDef> operators_;
|
| 333 |
+
#if !defined(C10_MOBILE)
|
| 334 |
+
LeftRight<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
|
| 335 |
+
#else
|
| 336 |
+
RWSafeLeftRightWrapper<ska::flat_hash_map<OperatorName, OperatorHandle>> operatorLookupTable_;
|
| 337 |
+
#endif
|
| 338 |
+
// Map from namespace to debug string (saying, e.g., where the library was defined)
|
| 339 |
+
ska::flat_hash_map<std::string, std::string> libraries_;
|
| 340 |
+
|
| 341 |
+
std::array<impl::AnnotatedKernel, num_runtime_entries> backendFallbackKernels_;
|
| 342 |
+
|
| 343 |
+
std::unique_ptr<detail::RegistrationListenerList> listeners_;
|
| 344 |
+
|
| 345 |
+
// This condition variable gets notified whenever we add a new def/impl to the
|
| 346 |
+
// dispatch table. This is primarily used by multipy/torchdeploy, when
|
| 347 |
+
// we have multiple interpreters trying to register to the dispatch table.
|
| 348 |
+
// In this situation, whenever the non-primary interpreter would have tried
|
| 349 |
+
// to register to the dispatch table, instead it will check to see if the
|
| 350 |
+
// expected registration has already been made, and if it hasn't, wait on
|
| 351 |
+
// this condition variable to see if it was just racing with the primary
|
| 352 |
+
// interpreter.
|
| 353 |
+
//
|
| 354 |
+
// We expect it to be rare for there to be any waiters on this condition
|
| 355 |
+
// variable. This is mostly just to help give better diagnostics if
|
| 356 |
+
// something goes horribly wrong
|
| 357 |
+
std::condition_variable cond_var_;
|
| 358 |
+
|
| 359 |
+
// Protect concurrent access to the dispatcher. We store this in a
|
| 360 |
+
// `shared_ptr` as we return callbacks that call back into dispatcher methods,
|
| 361 |
+
// and we need to be able to handle and guard against the event when the
|
| 362 |
+
// `Dispatcher` has been destroyed before the callbacks fire.
|
| 363 |
+
std::shared_ptr<Guard> guard_;
|
| 364 |
+
};
|
| 365 |
+
|
| 366 |
+
/**
|
| 367 |
+
* This is a handle to an operator schema registered with the dispatcher.
|
| 368 |
+
* This handle can be used to register kernels with the dispatcher or
|
| 369 |
+
* to lookup a kernel for a certain set of arguments.
|
| 370 |
+
*/
|
| 371 |
+
class TORCH_API OperatorHandle {
|
| 372 |
+
template <typename T> friend struct std::hash;
|
| 373 |
+
|
| 374 |
+
public:
|
| 375 |
+
OperatorHandle(OperatorHandle&&) noexcept = default;
|
| 376 |
+
OperatorHandle& operator=(OperatorHandle&&) noexcept = default;
|
| 377 |
+
OperatorHandle(const OperatorHandle&) = default;
|
| 378 |
+
OperatorHandle& operator=(const OperatorHandle&) = default;
|
| 379 |
+
// NOLINTNEXTLINE(performance-trivially-destructible)
|
| 380 |
+
~OperatorHandle();
|
| 381 |
+
|
| 382 |
+
const OperatorName& operator_name() const {
|
| 383 |
+
return operatorDef_->op.operator_name();
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
bool hasSchema() const {
|
| 387 |
+
return operatorDef_->op.hasSchema();
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
const FunctionSchema& schema() const {
|
| 391 |
+
return operatorDef_->op.schema();
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
const std::string& debug() const {
|
| 395 |
+
return operatorDef_->op.debug();
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
std::string dumpState() const {
|
| 399 |
+
return operatorDef_->op.dumpState();
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
bool hasKernelForDispatchKey(DispatchKey k) const {
|
| 403 |
+
return operatorDef_->op.hasKernelForDispatchKey(k);
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
bool isKernelFallthroughKernel(DispatchKey k) const {
|
| 407 |
+
return operatorDef_->op.kernelForDispatchKey(k).isFallthrough();
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
bool hasKernelForAnyDispatchKey(DispatchKeySet k) const {
|
| 411 |
+
return operatorDef_->op.hasKernelForAnyDispatchKey(k);
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
bool hasComputedKernelForDispatchKey(DispatchKey k) const {
|
| 415 |
+
return operatorDef_->op.hasComputedKernelForDispatchKey(k);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
std::string dumpComputedTable() const {
|
| 419 |
+
return operatorDef_->op.dumpComputedTable();
|
| 420 |
+
}
|
| 421 |
+
|
| 422 |
+
void checkInvariants() const {
|
| 423 |
+
return operatorDef_->op.checkInvariants();
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
c10::ArrayRef<at::Tag> getTags() const {
|
| 427 |
+
return operatorDef_->op.getTags();
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback) {
|
| 431 |
+
operatorDef_->op.setReportErrorCallback_(std::move(callback));
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
bool hasTag(const at::Tag& tag) const {
|
| 435 |
+
for(const auto& tag_: getTags()) {
|
| 436 |
+
if (tag == tag_) {
|
| 437 |
+
return true;
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
return false;
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
template<class FuncType>
|
| 444 |
+
TypedOperatorHandle<FuncType> typed() const {
|
| 445 |
+
// NB: This assert is not 100% sound: you can retrieve a typed() operator
|
| 446 |
+
// handle prior to ANY C++ signature being registered on the operator
|
| 447 |
+
// and the check will say everything is OK (at which point you can then
|
| 448 |
+
// smuggle in a kernel that is typed incorrectly). For everything
|
| 449 |
+
// in core library this won't happen, because all the static registrations
|
| 450 |
+
// will be done by the time a typed() handle is acquired.
|
| 451 |
+
#if !defined C10_MOBILE
|
| 452 |
+
operatorDef_->op.assertSignatureIsCorrect<FuncType>();
|
| 453 |
+
if (fn_has_symint<FuncType>::value) {
|
| 454 |
+
operatorDef_->op.assertSignatureIsCorrect<typename fn_remove_symint<FuncType>::type>();
|
| 455 |
+
}
|
| 456 |
+
#endif
|
| 457 |
+
return TypedOperatorHandle<FuncType>(operatorIterator_);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
void callBoxed(Stack* stack) const {
|
| 461 |
+
c10::Dispatcher::singleton().callBoxed(*this, stack);
|
| 462 |
+
}
|
| 463 |
+
|
| 464 |
+
void callBoxed(Stack& stack) const {
|
| 465 |
+
callBoxed(&stack);
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const {
|
| 469 |
+
c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack);
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
void redispatchBoxed(DispatchKeySet ks, Stack* stack) const {
|
| 473 |
+
c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack);
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
template <typename F>
|
| 477 |
+
PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const {
|
| 478 |
+
return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor);
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
bool operator==(const OperatorHandle& other) const {
|
| 482 |
+
return operatorDef_ == other.operatorDef_;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
bool operator!=(const OperatorHandle& other) const {
|
| 486 |
+
return operatorDef_ != other.operatorDef_;
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
private:
|
| 490 |
+
explicit OperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
|
| 491 |
+
: operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {}
|
| 492 |
+
friend class Dispatcher;
|
| 493 |
+
template<class> friend class TypedOperatorHandle;
|
| 494 |
+
|
| 495 |
+
// Storing a direct pointer to the OperatorDef even though we
|
| 496 |
+
// already have the iterator saves an instruction in the critical
|
| 497 |
+
// dispatch path. The iterator is effectively a
|
| 498 |
+
// pointer-to-std::list-node, and (at least in libstdc++'s
|
| 499 |
+
// implementation) the element is at an offset 16 bytes from that,
|
| 500 |
+
// because the prev/next pointers come first in the list node
|
| 501 |
+
// struct. So, an add instruction would be necessary to convert from the
|
| 502 |
+
// iterator to an OperatorDef*.
|
| 503 |
+
Dispatcher::OperatorDef* operatorDef_;
|
| 504 |
+
|
| 505 |
+
// We need to store this iterator in order to make
|
| 506 |
+
// Dispatcher::cleanup() fast -- it runs a lot on program
|
| 507 |
+
// termination (and presuambly library unloading).
|
| 508 |
+
std::list<Dispatcher::OperatorDef>::iterator operatorIterator_;
|
| 509 |
+
};
|
| 510 |
+
|
| 511 |
+
/**
|
| 512 |
+
* This is a handle to an operator schema registered with the dispatcher.
|
| 513 |
+
* It holds the same information as an OperatorHandle, but it is templated
|
| 514 |
+
* on the operator arguments and allows calling the operator in an
|
| 515 |
+
* unboxed way.
|
| 516 |
+
*/
|
| 517 |
+
template<class FuncType>
|
| 518 |
+
class TypedOperatorHandle final {
|
| 519 |
+
static_assert(guts::false_t<FuncType>(), "FuncType in OperatorHandle::typed<FuncType> was not a valid function type");
|
| 520 |
+
};
|
| 521 |
+
template<class Return, class... Args>
|
| 522 |
+
class TypedOperatorHandle<Return (Args...)> final : public OperatorHandle {
|
| 523 |
+
public:
|
| 524 |
+
TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default;
|
| 525 |
+
TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default;
|
| 526 |
+
TypedOperatorHandle(const TypedOperatorHandle&) = default;
|
| 527 |
+
TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default;
|
| 528 |
+
|
| 529 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 530 |
+
C10_ALWAYS_INLINE Return call(Args... args) const {
|
| 531 |
+
return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 535 |
+
C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const {
|
| 536 |
+
return c10::Dispatcher::singleton().redispatch<Return, Args...>(*this, currentDispatchKeySet, std::forward<Args>(args)...);
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
private:
|
| 540 |
+
explicit TypedOperatorHandle(std::list<Dispatcher::OperatorDef>::iterator operatorIterator)
|
| 541 |
+
: OperatorHandle(operatorIterator) {}
|
| 542 |
+
friend class OperatorHandle;
|
| 543 |
+
};
|
| 544 |
+
|
| 545 |
+
namespace detail {
|
| 546 |
+
template <class... Args> inline void unused_arg_(const Args&...) {}
|
| 547 |
+
|
| 548 |
+
// CaptureKernelCall is intended to capture return values from Dispatcher
|
| 549 |
+
// unboxed kernel calls. A record function may request to get outputs from the
|
| 550 |
+
// kernel calls. For boxed kernels, it's straightforward, the returned values
|
| 551 |
+
// are in the stack object. The stack can be passed to record functions. For
|
| 552 |
+
// unboxed kernels, we need to handle different kinds of return values, cache
|
| 553 |
+
// them temporarily, then release the values for the actual function call
|
| 554 |
+
// return.
|
| 555 |
+
template <typename ReturnType>
|
| 556 |
+
struct CaptureKernelCall {
|
| 557 |
+
template <typename F, typename... Args>
|
| 558 |
+
CaptureKernelCall(
|
| 559 |
+
const F& kernel,
|
| 560 |
+
const TypedOperatorHandle<ReturnType(Args...)>& op,
|
| 561 |
+
const DispatchKeySet& dispatchKeySet,
|
| 562 |
+
Args&&... args)
|
| 563 |
+
// Calls the kernel and capture the result in output_.
|
| 564 |
+
: output_{kernel.template call<ReturnType, Args...>(
|
| 565 |
+
op,
|
| 566 |
+
dispatchKeySet,
|
| 567 |
+
std::forward<Args>(args)...)} {}
|
| 568 |
+
// Wraps the return values in a Stack.
|
| 569 |
+
Stack getOutputs() {
|
| 570 |
+
Stack stack;
|
| 571 |
+
impl::push_outputs<ReturnType, false>::copy(output_, &stack);
|
| 572 |
+
return stack;
|
| 573 |
+
}
|
| 574 |
+
// Since we are returning the output_, we don't expect the output_ to be used
|
| 575 |
+
// afterward. Copy elision and RVO do not apply to class data members. Using
|
| 576 |
+
// move semantic to avoid copies when possible.
|
| 577 |
+
ReturnType release() && {
|
| 578 |
+
return std::move(output_);
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
private:
|
| 582 |
+
ReturnType output_;
|
| 583 |
+
};
|
| 584 |
+
|
| 585 |
+
// Handle the lvalue reference differently since it should not be moved.
|
| 586 |
+
template <>
|
| 587 |
+
inline at::Tensor& CaptureKernelCall<at::Tensor&>::release() && {
|
| 588 |
+
return output_;
|
| 589 |
+
}
|
| 590 |
+
|
| 591 |
+
// Handle case where the kernel returns void.
|
| 592 |
+
template <>
|
| 593 |
+
struct CaptureKernelCall<void> {
|
| 594 |
+
template <typename F, typename... Args>
|
| 595 |
+
CaptureKernelCall(
|
| 596 |
+
const F& kernel,
|
| 597 |
+
const TypedOperatorHandle<void(Args...)>& op,
|
| 598 |
+
const DispatchKeySet& dispatchKeySet,
|
| 599 |
+
Args&&... args) {
|
| 600 |
+
// Calling the kernel and no need to capture void.
|
| 601 |
+
kernel.template call<void, Args...>(
|
| 602 |
+
op, dispatchKeySet, std::forward<Args>(args)...);
|
| 603 |
+
}
|
| 604 |
+
Stack getOutputs() {
|
| 605 |
+
return Stack();
|
| 606 |
+
}
|
| 607 |
+
void release() && {}
|
| 608 |
+
};
|
| 609 |
+
|
| 610 |
+
} // namespace detail
|
| 611 |
+
|
| 612 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 613 |
+
template<class Return, class... Args>
|
| 614 |
+
inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle<Return(Args...)>& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) {
|
| 615 |
+
// If callbacks need inputs, we box the arguments and pass them to the guard.
|
| 616 |
+
// Note: For perf reasons we wouldn't want to prematurely box the arguments.
|
| 617 |
+
at::RecordFunction guard(std::move(stepCallbacks));
|
| 618 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved());
|
| 619 |
+
auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
|
| 620 |
+
auto& schema = op.schema();
|
| 621 |
+
auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
|
| 622 |
+
constexpr auto num_boxed_args = impl::boxed_size<Args...>();
|
| 623 |
+
if constexpr (num_boxed_args != 0) {
|
| 624 |
+
if (guard.needsInputs()) {
|
| 625 |
+
// If we used std::array<IValue, num_boxed_args> here, we would
|
| 626 |
+
// have to spend time default constructing the IValues in
|
| 627 |
+
// boxedArgs. aligned_storage has no such requirement.
|
| 628 |
+
impl::IValueAlignedStorage boxedArgs[num_boxed_args];
|
| 629 |
+
// For debugging only; could be removed (but the compiler will do
|
| 630 |
+
// that for us and it's nice to have the extra assurance of
|
| 631 |
+
// correctness from our debug builds).
|
| 632 |
+
int lastArgIdx = 0;
|
| 633 |
+
impl::boxArgsToStack(boxedArgs, lastArgIdx, args...);
|
| 634 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args);
|
| 635 |
+
// I don't *think* we need std::launder here, because IValue has
|
| 636 |
+
// no subclasses and no const or reference fields.
|
| 637 |
+
runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(reinterpret_cast<IValue *>(boxedArgs), num_boxed_args));
|
| 638 |
+
for (size_t ii = 0; ii < num_boxed_args; ++ii) {
|
| 639 |
+
reinterpret_cast<IValue *>(&boxedArgs[ii])->~IValue();
|
| 640 |
+
}
|
| 641 |
+
} else {
|
| 642 |
+
runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
|
| 643 |
+
}
|
| 644 |
+
} else {
|
| 645 |
+
runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
if (C10_UNLIKELY(guard.needsOutputs())) {
|
| 649 |
+
// Calls the kernel and capture the output temporarily to pass to
|
| 650 |
+
// RecordFunction.
|
| 651 |
+
detail::CaptureKernelCall<Return> captureKernelCall(
|
| 652 |
+
kernel, op, dispatchKeySet, std::forward<Args>(args)...);
|
| 653 |
+
guard.setOutputs(captureKernelCall.getOutputs());
|
| 654 |
+
// Releases the captured output to return to caller.
|
| 655 |
+
return std::move(captureKernelCall).release();
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
// keeping the guard alive while executing the kernel
|
| 659 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 663 |
+
template<class Return, class... Args>
|
| 664 |
+
C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle<Return(Args...)>& op, Args... args) const {
|
| 665 |
+
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
|
| 666 |
+
auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor()
|
| 667 |
+
.template getDispatchKeySetUnboxed<Args...>(args...);
|
| 668 |
+
#ifndef NDEBUG
|
| 669 |
+
DispatchTraceNestingGuard debug_guard;
|
| 670 |
+
if (show_dispatch_trace()) {
|
| 671 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 672 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 673 |
+
std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 674 |
+
}
|
| 675 |
+
#endif
|
| 676 |
+
const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet);
|
| 677 |
+
#ifndef PYTORCH_DISABLE_PER_OP_PROFILING
|
| 678 |
+
auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
|
| 679 |
+
if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) {
|
| 680 |
+
return callWithDispatchKeySlowPath<Return, Args...>(op, *step_callbacks, dispatchKeySet, kernel, std::forward<Args>(args)...);
|
| 681 |
+
}
|
| 682 |
+
#endif // PYTORCH_DISABLE_PER_OP_PROFILING
|
| 683 |
+
|
| 684 |
+
#ifdef FBCODE_CAFFE2
|
| 685 |
+
if(profilingOperatorEvents()) {
|
| 686 |
+
struct FireOpRAII {
|
| 687 |
+
FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) {
|
| 688 |
+
fireOpStartUSDT(schema_ref);
|
| 689 |
+
}
|
| 690 |
+
~FireOpRAII() { fireOpEndUSDT(schema_ref_); }
|
| 691 |
+
at::RecordFunction::schema_ref_t schema_ref_;
|
| 692 |
+
} event(op.schema());
|
| 693 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 694 |
+
} else {
|
| 695 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 696 |
+
}
|
| 697 |
+
#else
|
| 698 |
+
return kernel.template call<Return, Args...>(op, dispatchKeySet, std::forward<Args>(args)...);
|
| 699 |
+
#endif // FBCODE_CAFFE2
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use &&
|
| 703 |
+
template<class Return, class... Args>
|
| 704 |
+
inline Return Dispatcher::redispatch(const TypedOperatorHandle<Return (Args...)>& op, DispatchKeySet currentDispatchKeySet, Args... args) const {
|
| 705 |
+
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
|
| 706 |
+
// do not use RecordFunction on redispatch
|
| 707 |
+
#ifndef NDEBUG
|
| 708 |
+
DispatchTraceNestingGuard debug_guard;
|
| 709 |
+
if (show_dispatch_trace()) {
|
| 710 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 711 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 712 |
+
std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 713 |
+
}
|
| 714 |
+
#endif
|
| 715 |
+
const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet);
|
| 716 |
+
return kernel.template call<Return, Args...>(op, currentDispatchKeySet, std::forward<Args>(args)...);
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
|
| 720 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 721 |
+
const auto& entry = op.operatorDef_->op;
|
| 722 |
+
auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
|
| 723 |
+
#ifndef NDEBUG
|
| 724 |
+
DispatchTraceNestingGuard debug_guard;
|
| 725 |
+
if (show_dispatch_trace()) {
|
| 726 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 727 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 728 |
+
std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 729 |
+
}
|
| 730 |
+
#endif
|
| 731 |
+
const auto& kernel = entry.lookup(dispatchKeySet);
|
| 732 |
+
#ifndef PYTORCH_DISABLE_PER_OP_PROFILING
|
| 733 |
+
auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION);
|
| 734 |
+
if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) {
|
| 735 |
+
at::RecordFunction guard(std::move(*step_callbacks));
|
| 736 |
+
auto dispatchKey = dispatchKeySet.highestPriorityTypeId();
|
| 737 |
+
auto& schema = op.schema();
|
| 738 |
+
auto schema_ref = std::reference_wrapper<const FunctionSchema>(schema);
|
| 739 |
+
guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef<const c10::IValue>(stack->data(), stack->size()))
|
| 740 |
+
: runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet);
|
| 741 |
+
|
| 742 |
+
// keeping the guard alive while executing the kernel
|
| 743 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 744 |
+
|
| 745 |
+
if (C10_UNLIKELY(guard.needsOutputs())) {
|
| 746 |
+
guard.setOutputs(*stack);
|
| 747 |
+
}
|
| 748 |
+
return;
|
| 749 |
+
}
|
| 750 |
+
#endif // PYTORCH_DISABLE_PER_OP_PROFILING
|
| 751 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation
|
| 755 |
+
inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const {
|
| 756 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 757 |
+
const auto& entry = op.operatorDef_->op;
|
| 758 |
+
// We still compute this as we're obligated to pass it on to the internal
|
| 759 |
+
// kernel, if it is a boxed fallback
|
| 760 |
+
auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack);
|
| 761 |
+
const auto& kernel = ([&]() {
|
| 762 |
+
if (op.hasKernelForDispatchKey(dk)) {
|
| 763 |
+
return entry.kernelForDispatchKey(dk);
|
| 764 |
+
} else {
|
| 765 |
+
auto idx = getDispatchTableIndexForDispatchKey(dk);
|
| 766 |
+
TORCH_INTERNAL_ASSERT(idx >= 0);
|
| 767 |
+
return backendFallbackKernels_[idx].kernel;
|
| 768 |
+
}
|
| 769 |
+
})();
|
| 770 |
+
kernel.callBoxed(op, dispatchKeySet, stack);
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const {
|
| 774 |
+
// note: this doesn't need the mutex because write operations on the list keep iterators intact.
|
| 775 |
+
const auto& entry = op.operatorDef_->op;
|
| 776 |
+
#ifndef NDEBUG
|
| 777 |
+
DispatchTraceNestingGuard debug_guard;
|
| 778 |
+
if (show_dispatch_trace()) {
|
| 779 |
+
auto nesting_value = dispatch_trace_nesting_value();
|
| 780 |
+
for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " ";
|
| 781 |
+
std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl;
|
| 782 |
+
}
|
| 783 |
+
#endif
|
| 784 |
+
const auto& kernel = entry.lookup(dispatchKeySet);
|
| 785 |
+
return kernel.callBoxed(op, dispatchKeySet, stack);
|
| 786 |
+
}
|
| 787 |
+
|
| 788 |
+
} // namespace c10
|
| 789 |
+
|
| 790 |
+
namespace std {
|
| 791 |
+
|
| 792 |
+
template <>
|
| 793 |
+
struct hash<c10::OperatorHandle> {
|
| 794 |
+
size_t operator()(const c10::OperatorHandle& op) const noexcept {
|
| 795 |
+
return std::hash<void*>{}(static_cast<void*>(op.operatorDef_));
|
| 796 |
+
}
|
| 797 |
+
};
|
| 798 |
+
|
| 799 |
+
} // namespace std
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/operator_name.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
|
| 7 |
+
namespace c10 {
|
| 8 |
+
|
| 9 |
+
struct TORCH_API ObservedOperators {
|
| 10 |
+
ObservedOperators() = delete;
|
| 11 |
+
|
| 12 |
+
static bool isObserved(const OperatorName& name);
|
| 13 |
+
|
| 14 |
+
static std::unordered_set<std::string>& getUnobservedOperatorList();
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
} // namespace c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <c10/util/Metaprogramming.h>
|
| 5 |
+
#include <c10/util/flat_hash_map.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <c10/core/DispatchKey.h>
|
| 8 |
+
#include <c10/core/PyHandleCache.h>
|
| 9 |
+
#include <c10/core/SafePyObject.h>
|
| 10 |
+
#include <ATen/core/ivalue.h>
|
| 11 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 12 |
+
#include <ATen/core/dispatch/DispatchKeyExtractor.h>
|
| 13 |
+
|
| 14 |
+
#include <ATen/core/dispatch/OperatorOptions.h>
|
| 15 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 16 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 17 |
+
#include <ATen/core/enum_tag.h>
|
| 18 |
+
|
| 19 |
+
#include <list>
|
| 20 |
+
#include <array>
|
| 21 |
+
|
| 22 |
+
#ifdef C10_MOBILE
|
| 23 |
+
#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
namespace c10 {
|
| 27 |
+
|
| 28 |
+
class Dispatcher;
|
| 29 |
+
|
| 30 |
+
namespace impl {
|
| 31 |
+
|
| 32 |
+
// This data structure represents a kernel that was registered to us from a
|
| 33 |
+
// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata
|
| 34 |
+
// about the kernel that isn't necessary for actual dispatching (this is why
|
| 35 |
+
// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for
|
| 36 |
+
// giving good error messages.
|
| 37 |
+
struct AnnotatedKernel final {
|
| 38 |
+
AnnotatedKernel(KernelFunction k, std::unique_ptr<FunctionSchema> s, std::string d)
|
| 39 |
+
: kernel(std::move(k))
|
| 40 |
+
, inferred_function_schema(std::move(s))
|
| 41 |
+
, debug(std::move(d))
|
| 42 |
+
{}
|
| 43 |
+
AnnotatedKernel() = default;
|
| 44 |
+
KernelFunction kernel;
|
| 45 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 46 |
+
// A little debug string to help us identify the kernel in question.
|
| 47 |
+
// Most importantly it records the TORCH_LIBRARY block that did the
|
| 48 |
+
// registration.
|
| 49 |
+
std::string debug;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
// This data structure represents operator schema, with metadata specifying
|
| 53 |
+
// where the registration of this schema occurred
|
| 54 |
+
struct AnnotatedSchema final {
|
| 55 |
+
AnnotatedSchema(FunctionSchema s, std::string d)
|
| 56 |
+
: schema(std::move(s))
|
| 57 |
+
, debug(std::move(d))
|
| 58 |
+
{}
|
| 59 |
+
FunctionSchema schema;
|
| 60 |
+
std::string debug;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
// Internal data structure that records information about a specific operator.
|
| 64 |
+
// It's not part of the public API; typically, users will interact with
|
| 65 |
+
// OperatorHandle instead.
|
| 66 |
+
//
|
| 67 |
+
// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher
|
| 68 |
+
// lock (this is important because some methods in OperatorEntry access
|
| 69 |
+
// dispatcher state)
|
| 70 |
+
class TORCH_API OperatorEntry final {
|
| 71 |
+
public:
|
| 72 |
+
explicit OperatorEntry(OperatorName&& operator_name);
|
| 73 |
+
|
| 74 |
+
OperatorEntry(const OperatorEntry&) = delete;
|
| 75 |
+
OperatorEntry(OperatorEntry&&) noexcept = delete;
|
| 76 |
+
OperatorEntry& operator=(const OperatorEntry&) = delete;
|
| 77 |
+
OperatorEntry& operator=(OperatorEntry&&) noexcept = delete;
|
| 78 |
+
|
| 79 |
+
const FunctionSchema& schema() const {
|
| 80 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet");
|
| 81 |
+
return schema_->schema;
|
| 82 |
+
}
|
| 83 |
+
const std::string& debug() const {
|
| 84 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 85 |
+
return schema_->debug;
|
| 86 |
+
}
|
| 87 |
+
bool hasSchema() const {
|
| 88 |
+
return schema_.has_value();
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
bool isObserved() const {
|
| 92 |
+
return is_observed_;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
// We may allocate an OperatorEntry for an operator even when we don't
|
| 96 |
+
// have a schema. When we receive the schema registration, we post
|
| 97 |
+
// facto register a schema.
|
| 98 |
+
//
|
| 99 |
+
// NB: registerSchema/deregisterSchema are not idempotent; if you
|
| 100 |
+
// attempt to register a schema when one is already present or vice
|
| 101 |
+
// versa that is an error. (Refcounting for the registrations is
|
| 102 |
+
// handled in the OperatorHandle in Dispatcher)
|
| 103 |
+
void registerSchema(FunctionSchema&&, std::string&& debug, std::vector<at::Tag> tags = {});
|
| 104 |
+
void deregisterSchema();
|
| 105 |
+
|
| 106 |
+
const OperatorName& operator_name() const {
|
| 107 |
+
return name_;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 111 |
+
using AnnotatedKernelContainer = std::array<AnnotatedKernel, 1>;
|
| 112 |
+
#else
|
| 113 |
+
using AnnotatedKernelContainer = std::list<AnnotatedKernel>;
|
| 114 |
+
#endif
|
| 115 |
+
using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator;
|
| 116 |
+
|
| 117 |
+
// Why are kernels and fallback asymmetric? It has to do with ownership.
|
| 118 |
+
// Kernels and the computed dispatch tables for them are canonically
|
| 119 |
+
// owned by OperatorEntry, but backend fallbacks are specified once
|
| 120 |
+
// and apply for all operators, so they should be owned by Dispatcher.
|
| 121 |
+
// However, the registration of a backend fallback affects the
|
| 122 |
+
// state of the computed dispatch table, so when a backend fallback
|
| 123 |
+
// is updated, we need to update the operator tables too. Thus,
|
| 124 |
+
// registerKernel is the mechanism by which we give kernels to
|
| 125 |
+
// operator entry to own (and update dispatch table), but we only
|
| 126 |
+
// need a non-owning mechanism to update fallback.
|
| 127 |
+
|
| 128 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 129 |
+
// Postcondition: caller is responsible for disposing of the kernel
|
| 130 |
+
AnnotatedKernelContainerIterator registerKernel(
|
| 131 |
+
const Dispatcher& dispatcher,
|
| 132 |
+
std::optional<DispatchKey> dispatch_key,
|
| 133 |
+
KernelFunction kernel,
|
| 134 |
+
std::optional<CppSignature> cpp_signature,
|
| 135 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema,
|
| 136 |
+
std::string debug
|
| 137 |
+
);
|
| 138 |
+
|
| 139 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 140 |
+
void deregisterKernel_(
|
| 141 |
+
const Dispatcher& dispatcher,
|
| 142 |
+
std::optional<DispatchKey> dispatch_key,
|
| 143 |
+
AnnotatedKernelContainerIterator kernel
|
| 144 |
+
);
|
| 145 |
+
|
| 146 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 147 |
+
void updateFallback(
|
| 148 |
+
const Dispatcher& dispatcher,
|
| 149 |
+
DispatchKey dispatch_key
|
| 150 |
+
);
|
| 151 |
+
|
| 152 |
+
// Precondition: Dispatcher::mutex_ is held
|
| 153 |
+
void updateSchemaAliasAnalysis(AliasAnalysisKind a) {
|
| 154 |
+
TORCH_INTERNAL_ASSERT(schema_.has_value());
|
| 155 |
+
schema_->schema.setAliasAnalysis(a);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
std::string dumpComputedTable() const;
|
| 159 |
+
std::string dumpState() const;
|
| 160 |
+
void checkInvariants() const;
|
| 161 |
+
|
| 162 |
+
const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; }
|
| 163 |
+
|
| 164 |
+
// Asserts that the given FuncType is correct for calling this operator in an unboxed way.
|
| 165 |
+
template<class FuncType>
|
| 166 |
+
inline void assertSignatureIsCorrect() {
|
| 167 |
+
assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
|
| 171 |
+
|
| 172 |
+
[[noreturn]] void reportError(DispatchKey dispatchKey) const;
|
| 173 |
+
|
| 174 |
+
const KernelFunction& lookup(DispatchKeySet ks) const {
|
| 175 |
+
const auto idx = ks.getDispatchTableIndexForDispatchKeySet();
|
| 176 |
+
if (C10_UNLIKELY(idx == -1)) {
|
| 177 |
+
reportError(ks.highestPriorityTypeId());
|
| 178 |
+
}
|
| 179 |
+
const auto& kernel = dispatchTable_[idx];
|
| 180 |
+
// A valid kernel *always* has a boxed kernel and *may* have an
|
| 181 |
+
// unboxed kernel. However, we typically do unboxed calls in at::
|
| 182 |
+
// APIs, where the kernel 1) will very likely be valid and 2)
|
| 183 |
+
// should have an unboxed kernel. Checking the unboxed kernel
|
| 184 |
+
// first will allow us to avoid touching the boxed kernel at all
|
| 185 |
+
// in the common case.
|
| 186 |
+
if (C10_UNLIKELY(!kernel.isValidUnboxed())) {
|
| 187 |
+
if (!kernel.isValid()) {
|
| 188 |
+
reportError(ks.highestPriorityTypeId());
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
return kernel;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
std::string listAllDispatchKeys() const;
|
| 195 |
+
|
| 196 |
+
// Returns true if kernel_ has entry for any key in ks.
|
| 197 |
+
//
|
| 198 |
+
// Invariant: There are no alias keys in the passed-in dispatch key set.
|
| 199 |
+
// Note [No Alias Keys in DispatchKeySet]
|
| 200 |
+
// Alias keys should be checked using `hasKernelForDispatchKey`
|
| 201 |
+
// Alias keys shouldn't go inside of a DispatchKeySet, since they can technically
|
| 202 |
+
// have a value > 63 (causing overflow).
|
| 203 |
+
bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const;
|
| 204 |
+
// Returns true if kernel_ has entry for a particular key.
|
| 205 |
+
bool hasKernelForDispatchKey(DispatchKey k) const;
|
| 206 |
+
// Retrieves the kernel entry at a particular key. Symmetric with
|
| 207 |
+
// hasKernelForDispatchKey. To get the AnnotatedKernel, see
|
| 208 |
+
// getKernelForDispatchKey (private)
|
| 209 |
+
const KernelFunction& kernelForDispatchKey(DispatchKey k) const;
|
| 210 |
+
// Returns true if the "computed table" has an entry for a particular key.
|
| 211 |
+
bool hasComputedKernelForDispatchKey(DispatchKey k) const;
|
| 212 |
+
// Returns all the operator tags added at the time of registration
|
| 213 |
+
const std::vector<at::Tag>& getTags() const;
|
| 214 |
+
void setReportErrorCallback_(std::unique_ptr<c10::SafePyObject> callback);
|
| 215 |
+
|
| 216 |
+
template <typename F>
|
| 217 |
+
PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const {
|
| 218 |
+
return py_cache_.ptr_or(self_interpreter, slow_accessor);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
private:
|
| 222 |
+
|
| 223 |
+
OperatorName name_;
|
| 224 |
+
std::optional<AnnotatedSchema> schema_;
|
| 225 |
+
#ifndef C10_MOBILE
|
| 226 |
+
std::vector<at::Tag> tags_;
|
| 227 |
+
#endif
|
| 228 |
+
std::array<KernelFunction, c10::num_runtime_entries> dispatchTable_;
|
| 229 |
+
DispatchKeyExtractor dispatchKeyExtractor_;
|
| 230 |
+
// Pointer to the torch.ops.ns.op.overload object for speed
|
| 231 |
+
c10::PyHandleCache py_cache_;
|
| 232 |
+
|
| 233 |
+
// kernels_ stores all registered kernels for the corresponding dispatch key
|
| 234 |
+
// and catchAllKernels_ stores the catch-all kernels.
|
| 235 |
+
// If an operator library gets loaded that overwrites an already existing kernel,
|
| 236 |
+
// both kernels will be in that list but only the newer one will be in
|
| 237 |
+
// dispatchTable. If any of the kernels go away (say the library gets
|
| 238 |
+
// unloaded), we remove the kernel from this list and update the
|
| 239 |
+
// dispatchTable if necessary.
|
| 240 |
+
// Kernels in the list are ordered by registration time descendingly,
|
| 241 |
+
// newer registrations are before older registrations.
|
| 242 |
+
// We do not combine dispatchTable and kernels into one hash map because
|
| 243 |
+
// kernels is a larger data structure and accessed quite infrequently
|
| 244 |
+
// while dispatchTable is accessed often and should be kept small to fit
|
| 245 |
+
// into CPU caches.
|
| 246 |
+
// Invariants:
|
| 247 |
+
// - dispatchTable[dispatch_key] == kernels_[dispatch_key].front()
|
| 248 |
+
// - dispatchTable[dispatch_key] does not exist if and only if
|
| 249 |
+
// kernels_[dispatch_key] does not exist
|
| 250 |
+
// - If kernels_[dispatch_key] exists, then it has elements.
|
| 251 |
+
// It is never an empty list.
|
| 252 |
+
//
|
| 253 |
+
// Why do we do that?
|
| 254 |
+
// -----
|
| 255 |
+
// We mostly do this to enable Jupyter notebooks where a cell registering
|
| 256 |
+
// a kernel could be executed multiple times and the later execution
|
| 257 |
+
// should overwrite the earlier one. Note that this still fails when the
|
| 258 |
+
// function schema changed between the executions, but it works as long
|
| 259 |
+
// as the function schema didn't change. A better solution would be to
|
| 260 |
+
// unload the old extension library from the Jupyter cell when the cell is
|
| 261 |
+
// re-executed and then only allow one kernel here, i.e. error if a kernel
|
| 262 |
+
// is already registered, but that's a lot of effort to implement and
|
| 263 |
+
// currently not high-pri.
|
| 264 |
+
ska::flat_hash_map<DispatchKey,
|
| 265 |
+
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
|
| 266 |
+
// On mobile, we needn't worry about Jupyter notebooks.
|
| 267 |
+
std::array<AnnotatedKernel, 1>
|
| 268 |
+
#else
|
| 269 |
+
std::list<AnnotatedKernel>
|
| 270 |
+
#endif
|
| 271 |
+
> kernels_;
|
| 272 |
+
|
| 273 |
+
const AnnotatedKernel& missingKernel() const;
|
| 274 |
+
const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
|
| 275 |
+
|
| 276 |
+
// cpp_signature_ stores function signature if any of
|
| 277 |
+
// the kernels was created in a way that allowed us to know the function
|
| 278 |
+
// signature (i.e. by supplying an unboxed C++ kernel function).
|
| 279 |
+
// If this is set, it will be used to check that future kernel
|
| 280 |
+
// registrations match and it will be used in unboxed function calls
|
| 281 |
+
// to verify their arguments against the known function signature.
|
| 282 |
+
struct CppSignatureWithDebug {
|
| 283 |
+
CppSignature signature;
|
| 284 |
+
std::string debug;
|
| 285 |
+
std::optional<DispatchKey> dispatch_key;
|
| 286 |
+
};
|
| 287 |
+
std::optional<CppSignatureWithDebug> cpp_signature_;
|
| 288 |
+
std::optional<CppSignatureWithDebug> sym_cpp_signature_;
|
| 289 |
+
|
| 290 |
+
// A Python custom error handler for OperatorEntry::reportError
|
| 291 |
+
std::unique_ptr<c10::SafePyObject> report_error_callback_;
|
| 292 |
+
|
| 293 |
+
// Whether this operator needs to be observed with RecordFunction
|
| 294 |
+
const bool is_observed_;
|
| 295 |
+
|
| 296 |
+
[[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const;
|
| 297 |
+
const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const;
|
| 298 |
+
std::pair<const AnnotatedKernel&, const char*> computeDispatchTableEntryWithDebug(
|
| 299 |
+
const c10::Dispatcher& dispatcher, DispatchKey dispatch_key
|
| 300 |
+
) const;
|
| 301 |
+
// This function re-establishes the invariant that dispatchTable
|
| 302 |
+
// contains the front element from the kernels list for a given runtime dispatch key.
|
| 303 |
+
void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 304 |
+
// Like above, but also handles alias dispatch keys.
|
| 305 |
+
void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key);
|
| 306 |
+
// Like above, but for ALL entries in the dispatch table.
|
| 307 |
+
void updateDispatchTableFull_(const c10::Dispatcher& dispatcher);
|
| 308 |
+
// Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front().
|
| 309 |
+
const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const;
|
| 310 |
+
};
|
| 311 |
+
|
| 312 |
+
} // namespace impl
|
| 313 |
+
} // namespace c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cstdint>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
enum class AliasAnalysisKind : uint8_t {
|
| 8 |
+
INTERNAL_SPECIAL_CASE,
|
| 9 |
+
CONSERVATIVE, // The most conservative alias analysis type, assumes
|
| 10 |
+
// side-effects. This is the default analysis.
|
| 11 |
+
FROM_SCHEMA,
|
| 12 |
+
PURE_FUNCTION
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
#if !defined(_MSC_VER)
|
| 16 |
+
constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr.
|
| 17 |
+
#endif
|
| 18 |
+
inline const char* toString(AliasAnalysisKind aliasAnalysisKind) {
|
| 19 |
+
return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE)
|
| 20 |
+
? "CONSERVATIVE"
|
| 21 |
+
: (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA)
|
| 22 |
+
? "FROM_SCHEMA"
|
| 23 |
+
: (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION)
|
| 24 |
+
? "PURE_FUNCTION"
|
| 25 |
+
: (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE)
|
| 26 |
+
? "INTERNAL_SPECIAL_CASE"
|
| 27 |
+
: "UNKNOWN";
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
} // namespace c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <functional>
|
| 4 |
+
|
| 5 |
+
namespace c10 {
|
| 6 |
+
|
| 7 |
+
class RegistrationHandleRAII final {
|
| 8 |
+
public:
|
| 9 |
+
explicit RegistrationHandleRAII(std::function<void()> onDestruction)
|
| 10 |
+
: onDestruction_(std::move(onDestruction)) {}
|
| 11 |
+
|
| 12 |
+
~RegistrationHandleRAII() {
|
| 13 |
+
if (onDestruction_) {
|
| 14 |
+
onDestruction_();
|
| 15 |
+
}
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
RegistrationHandleRAII(const RegistrationHandleRAII&) = delete;
|
| 19 |
+
RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete;
|
| 20 |
+
|
| 21 |
+
RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept
|
| 22 |
+
: onDestruction_(std::move(rhs.onDestruction_)) {
|
| 23 |
+
rhs.onDestruction_ = nullptr;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept {
|
| 27 |
+
onDestruction_ = std::move(rhs.onDestruction_);
|
| 28 |
+
rhs.onDestruction_ = nullptr;
|
| 29 |
+
return *this;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
private:
|
| 33 |
+
std::function<void()> onDestruction_;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Tensor.h>
|
| 4 |
+
#include <ATen/TensorUtils.h>
|
| 5 |
+
#include <ATen/core/List.h>
|
| 6 |
+
#include <c10/core/TensorOptions.h>
|
| 7 |
+
|
| 8 |
+
/*
|
| 9 |
+
* [Note: hacky wrapper removal for optional tensor]
|
| 10 |
+
*
|
| 11 |
+
* The kernel implementation takes an optional tensor marked in the schema as
|
| 12 |
+
* Tensor? but the C++ function takes Tensor instead of the optional<Tensor>
|
| 13 |
+
* expected by the dispatcher.
|
| 14 |
+
*
|
| 15 |
+
* To remove the hacky wrapper, the C++ function is changed to take
|
| 16 |
+
* optional<Tensor> and unwrap the Tensor value at the beginning of
|
| 17 |
+
* the function, e.g.:
|
| 18 |
+
* > c10::MaybeOwned<Tensor> weight_maybe_owned =
|
| 19 |
+
* > at::borrow_from_optional_tensor(weight_opt);
|
| 20 |
+
* > const Tensor& weight = *weight_maybe_owned;
|
| 21 |
+
*
|
| 22 |
+
* We may want to make the kernel handle optional directly without
|
| 23 |
+
* going through the creation of a default-constructed Tensor in
|
| 24 |
+
* at::borrow_from_optional_tensor.
|
| 25 |
+
*/
|
| 26 |
+
|
| 27 |
+
/*
|
| 28 |
+
* [Note: hacky wrapper removal for TensorOptions]
|
| 29 |
+
*
|
| 30 |
+
* The kernel implementation takes a TensorOptions argument but the dispatcher
|
| 31 |
+
* expects separate arguments for dtype, layout, device, pin_memory.
|
| 32 |
+
*
|
| 33 |
+
* To remove the hacky wrapper, the kernel implementation is changed to take
|
| 34 |
+
* the 4 arguments (dtype, layout, device, pin_memory), and assemble the
|
| 35 |
+
* TensorOptions value at the beginning of the function, e.g.:
|
| 36 |
+
* > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
|
| 37 |
+
* > .device(device).pinned_memory(pin_memory);
|
| 38 |
+
*
|
| 39 |
+
* We may want make the kernel handle these parameters directly without going
|
| 40 |
+
* through the creation of a TensorOptions value.
|
| 41 |
+
*/
|
| 42 |
+
|
| 43 |
+
namespace c10 {
|
| 44 |
+
namespace impl {
|
| 45 |
+
|
| 46 |
+
TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
|
| 47 |
+
|
| 48 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 49 |
+
// TODO: Remove this once the following issue is addressed:
|
| 50 |
+
// https://github.com/pytorch/pytorch/issues/57380
|
| 51 |
+
if (!tensor.defined()) {
|
| 52 |
+
return;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
if (!common_device.has_value()) {
|
| 56 |
+
common_device = tensor.device();
|
| 57 |
+
return;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
if (C10_UNLIKELY(common_device != tensor.device())) {
|
| 61 |
+
common_device_check_failure(*common_device, tensor, methodName, argName);
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 66 |
+
if (tensor.has_value()) {
|
| 67 |
+
check_and_update_common_device(common_device, tensor.value(), methodName, argName);
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
inline void check_and_update_common_device(optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 72 |
+
for (const auto& tensor : tensors) {
|
| 73 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
inline void check_and_update_common_device(optional<Device>& common_device, const List<optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
|
| 78 |
+
for (const auto& tensor : tensors) {
|
| 79 |
+
check_and_update_common_device(common_device, tensor, methodName, argName);
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
} // namespace impl
|
| 83 |
+
} // namespace c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* This file contains functionality to take a C++ function and infer its
|
| 5 |
+
* c10::FunctionSchema.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <ATen/core/function_schema.h>
|
| 9 |
+
#include <c10/util/Metaprogramming.h>
|
| 10 |
+
|
| 11 |
+
namespace c10 {
|
| 12 |
+
namespace detail {
|
| 13 |
+
|
| 14 |
+
namespace infer_schema {
|
| 15 |
+
|
| 16 |
+
/// The templated inference code creates `ArgumentDef` instead of `Argument`,
|
| 17 |
+
/// because that can be constructed at compile time and has a much smaller
|
| 18 |
+
/// binary size than having calls to `Argument` constructors in the template.
|
| 19 |
+
/// Creating `Argument` objects from `ArgumentDef` can then be done at
|
| 20 |
+
/// runtime in a non-templated way.
|
| 21 |
+
struct ArgumentDef final {
|
| 22 |
+
using GetTypeFn = TypePtr();
|
| 23 |
+
GetTypeFn* getTypeFn;
|
| 24 |
+
GetTypeFn* getFakeTypeFn;
|
| 25 |
+
constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {}
|
| 26 |
+
explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {}
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
template<bool V>
|
| 30 |
+
struct bool_t {};
|
| 31 |
+
template<> struct bool_t<true> : std::true_type {};
|
| 32 |
+
template<> struct bool_t<false> : std::false_type {};
|
| 33 |
+
|
| 34 |
+
/// Checks the static C++ types `Types` for correctness to catch common error cases.
|
| 35 |
+
template <class... Types>
|
| 36 |
+
constexpr int checkStaticTypes() {
|
| 37 |
+
// Give nice error messages for some of the common error cases.
|
| 38 |
+
// Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT
|
| 39 |
+
static_assert(std::conjunction<
|
| 40 |
+
bool_t<!std::is_integral<Types>::value || std::is_same<Types, int8_t>::value || std::is_same<Types, int64_t>::value || std::is_same<Types, bool>::value>...
|
| 41 |
+
>::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type");
|
| 42 |
+
static_assert(std::conjunction<
|
| 43 |
+
bool_t<!std::is_same<Types, float>::value>...
|
| 44 |
+
>::value, "INVALID TYPE: float is not supported as an argument type, use double instead");
|
| 45 |
+
return 0;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename... Ts, size_t... Is>
|
| 49 |
+
constexpr std::array<ArgumentDef, sizeof...(Ts)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
|
| 50 |
+
return (
|
| 51 |
+
// Check types for common errors
|
| 52 |
+
checkStaticTypes<Ts...>(),
|
| 53 |
+
|
| 54 |
+
// Create the return value
|
| 55 |
+
std::array<ArgumentDef, sizeof...(Ts)>{
|
| 56 |
+
ArgumentDef(&getTypePtrCopy<std::decay_t<Ts>>, &getFakeTypePtrCopy<std::decay_t<Ts>>)...}
|
| 57 |
+
);
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 61 |
+
/// as template arguments.
|
| 62 |
+
template<class ParameterTypes> struct createArguments final {};
|
| 63 |
+
template<class... ParameterTypes>
|
| 64 |
+
struct createArguments<guts::typelist::typelist<ParameterTypes...>> final {
|
| 65 |
+
static constexpr std::array<ArgumentDef, sizeof...(ParameterTypes)> call() {
|
| 66 |
+
return createArgumentVectorFromTypes<ParameterTypes...>(
|
| 67 |
+
std::make_index_sequence<sizeof...(ParameterTypes)>()
|
| 68 |
+
);
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified
|
| 73 |
+
/// as a tuple (i.e. in the way c10 kernels return values).
|
| 74 |
+
/// It can be a tuple<A, B, C> if there's three output arguments with types A, B, C.
|
| 75 |
+
/// It can be an empty tuple<>, or void for kernels that don't return anything.
|
| 76 |
+
/// It can be a single type A (i.e. no tuple) for the case where a kernel just
|
| 77 |
+
/// returns one value.
|
| 78 |
+
template<class ReturnTypeTuple, class Enable = void> struct createReturns final {};
|
| 79 |
+
|
| 80 |
+
template<class... ReturnTypes>
|
| 81 |
+
struct createReturns<std::tuple<ReturnTypes...>, void> final {
|
| 82 |
+
static constexpr std::array<ArgumentDef, sizeof...(ReturnTypes)> call() {
|
| 83 |
+
return createArgumentVectorFromTypes<ReturnTypes...>(
|
| 84 |
+
std::make_index_sequence<sizeof...(ReturnTypes)>()
|
| 85 |
+
);
|
| 86 |
+
}
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
template<class ReturnType>
|
| 90 |
+
struct createReturns<ReturnType, std::enable_if_t<!std::is_same<void, ReturnType>::value && !guts::is_instantiation_of<std::tuple, ReturnType>::value>> final {
|
| 91 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 92 |
+
return createReturns<std::tuple<ReturnType>>::call();
|
| 93 |
+
}
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
template<>
|
| 97 |
+
struct createReturns<void, void> final {
|
| 98 |
+
static constexpr std::array<ArgumentDef, 0> call() {
|
| 99 |
+
return createReturns<std::tuple<>>::call();
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
template <typename ReturnType>
|
| 104 |
+
struct createSingleReturn {
|
| 105 |
+
static constexpr std::array<ArgumentDef, 1> call() {
|
| 106 |
+
return createArgumentVectorFromTypes<ReturnType>(std::make_index_sequence<1>());
|
| 107 |
+
}
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 111 |
+
TORCH_API FunctionSchema make_function_schema(c10::ArrayRef<ArgumentDef> arguments, c10::ArrayRef<ArgumentDef> returns);
|
| 112 |
+
|
| 113 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 114 |
+
/// function. Flattens std::tuple returns into multiple return types
|
| 115 |
+
template <typename FunctionTraits>
|
| 116 |
+
FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() {
|
| 117 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 118 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 119 |
+
|
| 120 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 121 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 122 |
+
// of the arguments/returns from the std::array.
|
| 123 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 124 |
+
constexpr auto returns = createReturns<ReturnType>::call();
|
| 125 |
+
|
| 126 |
+
return make_function_schema(arguments, returns);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a
|
| 130 |
+
/// function. Preserves std::tuple returns as a Tuple return type
|
| 131 |
+
template <typename FunctionTraits>
|
| 132 |
+
FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 133 |
+
using ReturnType = typename FunctionTraits::return_type;
|
| 134 |
+
using ParameterTypes = typename FunctionTraits::parameter_types;
|
| 135 |
+
|
| 136 |
+
// arguments and returns are computed into a std::array at compile time and embedded into the binary.
|
| 137 |
+
// The only code executed at runtime here is the one that creates a std::vector
|
| 138 |
+
// of the arguments/returns from the std::array.
|
| 139 |
+
constexpr auto arguments = createArguments<ParameterTypes>::call();
|
| 140 |
+
constexpr auto returns = createSingleReturn<ReturnType>::call();
|
| 141 |
+
|
| 142 |
+
return make_function_schema(std::move(name), std::move(overload_name), arguments, returns);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
template<class FuncType>
|
| 149 |
+
FunctionSchema inferFunctionSchemaFlattenedReturns() {
|
| 150 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns<guts::infer_function_traits_t<FuncType>>();
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
template<class FuncType>
|
| 154 |
+
FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) {
|
| 155 |
+
return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn<guts::infer_function_traits_t<FuncType>>(std::move(name), std::move(overload_name));
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
TORCH_API std::optional<std::string> findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified);
|
| 159 |
+
|
| 160 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
|
| 4 |
+
#ifdef TEMPLATE_SELECTIVE_BUILD
|
| 5 |
+
#include <ATen/selected_mobile_ops.h>
|
| 6 |
+
#endif
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* This header implements functionality to build PyTorch with only a certain
|
| 10 |
+
* set of operators (+ dependencies) included.
|
| 11 |
+
*
|
| 12 |
+
* - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
|
| 13 |
+
* two ops will be included in your build. The allowlist records operators
|
| 14 |
+
* only, no overloads; if you include aten::add, all overloads of aten::add
|
| 15 |
+
* will be included.
|
| 16 |
+
*
|
| 17 |
+
* Internally, this is done by removing the operator registration calls
|
| 18 |
+
* using compile time programming, and the linker will then prune all
|
| 19 |
+
* operator functions that weren't registered.
|
| 20 |
+
* See Note [Selective build] for more details
|
| 21 |
+
*
|
| 22 |
+
* WARNING: The allowlist mechanism doesn't work for all ways you could go about
|
| 23 |
+
* registering an operator. If the dispatch key / operator name is not
|
| 24 |
+
* sufficiently obvious at compile time, then the allowlisting mechanism
|
| 25 |
+
* will fail (and the operator will be included in the binary anyway).
|
| 26 |
+
*/
|
| 27 |
+
|
| 28 |
+
#include <c10/util/string_view.h>
|
| 29 |
+
#include <c10/core/DispatchKey.h>
|
| 30 |
+
#include <c10/macros/Macros.h>
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 34 |
+
#include <ATen/record_function.h>
|
| 35 |
+
#endif
|
| 36 |
+
|
| 37 |
+
namespace c10 {
|
| 38 |
+
|
| 39 |
+
namespace impl {
|
| 40 |
+
|
| 41 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
|
| 42 |
+
|
| 43 |
+
/**
|
| 44 |
+
* In selective build mode returns true/false depending on whether a build
|
| 45 |
+
* feature is available or not.
|
| 46 |
+
*
|
| 47 |
+
* In instrumenting mode (tracing mode), always returns true, and doesn't
|
| 48 |
+
* trigger any side effects.
|
| 49 |
+
*/
|
| 50 |
+
constexpr bool is_build_feature_available(const char* name) {
|
| 51 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
|
| 52 |
+
// Selective Build mode.
|
| 53 |
+
#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 54 |
+
(void)name;
|
| 55 |
+
return true;
|
| 56 |
+
#else
|
| 57 |
+
return allowlist_contains(
|
| 58 |
+
C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
|
| 59 |
+
name);
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#else
|
| 63 |
+
// Instrumenting mode.
|
| 64 |
+
(void)name;
|
| 65 |
+
return true;
|
| 66 |
+
#endif
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
[[noreturn]] void build_feature_required_feature_not_available(const char* feature);
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* Use BUILD_FEATURE_REQUIRED macro in user-code.
|
| 73 |
+
*
|
| 74 |
+
* In selective build mode becomes a no-op if the build feature passed
|
| 75 |
+
* in is available. If not available, throws an exception (c10::Error).
|
| 76 |
+
* The compiler is able to perform dead code elimination for code
|
| 77 |
+
* following this method if the build feature is not available.
|
| 78 |
+
*
|
| 79 |
+
* In instrumenting mode (tracing mode), registers (as a side effect)
|
| 80 |
+
* the presence of this specific build feature being triggered.
|
| 81 |
+
*/
|
| 82 |
+
#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
|
| 83 |
+
|
| 84 |
+
#if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
|
| 85 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 86 |
+
if (!c10::impl::is_build_feature_available(NAME)) { \
|
| 87 |
+
::c10::impl::build_feature_required_feature_not_available(NAME); \
|
| 88 |
+
}
|
| 89 |
+
#else // Everything trivially selected
|
| 90 |
+
#define BUILD_FEATURE_REQUIRED(NAME)
|
| 91 |
+
|
| 92 |
+
#endif
|
| 93 |
+
|
| 94 |
+
#else // trace mode
|
| 95 |
+
#define BUILD_FEATURE_REQUIRED(NAME) \
|
| 96 |
+
RECORD_FUNCTION_WITH_SCOPE( \
|
| 97 |
+
at::RecordScope::BUILD_FEATURE, \
|
| 98 |
+
std::string(NAME), \
|
| 99 |
+
{});
|
| 100 |
+
#endif
|
| 101 |
+
|
| 102 |
+
// Use this macro, and not is_build_feature_available
|
| 103 |
+
#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
|
| 104 |
+
|
| 105 |
+
// returns true iff allowlist contains item
|
| 106 |
+
// allowlist_contains("a;bc;d", "bc") == true
|
| 107 |
+
constexpr bool allowlist_contains(string_view allowlist, string_view item) {
|
| 108 |
+
//Choose a really big value for next so that if something goes wrong
|
| 109 |
+
//this code will blow up in a hopefully detectable way.
|
| 110 |
+
size_t next = std::numeric_limits<size_t>::max();
|
| 111 |
+
for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
|
| 112 |
+
next = allowlist.find(';', cur);
|
| 113 |
+
if (next != string_view::npos) {
|
| 114 |
+
if (allowlist.substr(cur, next - cur).compare(item) == 0) {
|
| 115 |
+
return true;
|
| 116 |
+
}
|
| 117 |
+
next++;
|
| 118 |
+
} else {
|
| 119 |
+
if (allowlist.substr(cur).compare(item) == 0) {
|
| 120 |
+
return true;
|
| 121 |
+
}
|
| 122 |
+
break;
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
return false;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
// Returns true iff the given op name is on the allowlist
|
| 129 |
+
// and should be registered
|
| 130 |
+
constexpr bool op_allowlist_check(string_view op_name) {
|
| 131 |
+
assert(op_name.find("::") != string_view::npos);
|
| 132 |
+
// Use assert() instead of throw() due to a gcc bug. See:
|
| 133 |
+
// https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
|
| 134 |
+
// https://github.com/fmtlib/fmt/issues/682
|
| 135 |
+
assert(op_name.find("(") == string_view::npos);
|
| 136 |
+
#if !defined(TORCH_OPERATOR_WHITELIST)
|
| 137 |
+
// If the TORCH_OPERATOR_WHITELIST parameter is not defined,
|
| 138 |
+
// all ops are to be registered
|
| 139 |
+
return true;
|
| 140 |
+
#else
|
| 141 |
+
return allowlist_contains(
|
| 142 |
+
C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
|
| 143 |
+
// This function is majorly used for mobile selective build with
|
| 144 |
+
// root operators, where the overload is included in the allowlist.
|
| 145 |
+
op_name);
|
| 146 |
+
// // Strip overload name (as allowlist doesn't contain overloads)
|
| 147 |
+
// // Another function based on this may be added when there's usage
|
| 148 |
+
// // on op names without overload.
|
| 149 |
+
// OperatorNameView::parse(op_name).name);
|
| 150 |
+
#endif
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
// Returns true iff the given schema string is on the allowlist
|
| 154 |
+
// and should be registered
|
| 155 |
+
constexpr bool schema_allowlist_check(string_view schema) {
|
| 156 |
+
#if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
|
| 157 |
+
return true;
|
| 158 |
+
#else
|
| 159 |
+
return op_allowlist_check(schema.substr(0, schema.find("(")));
|
| 160 |
+
#endif
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
// Returns true iff the given custom class name is on the allowlist
|
| 164 |
+
// and should be registered
|
| 165 |
+
constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
|
| 166 |
+
#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
|
| 167 |
+
// If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
|
| 168 |
+
// all custom classes are to be registered
|
| 169 |
+
(void)custom_class_name;
|
| 170 |
+
return true;
|
| 171 |
+
#else
|
| 172 |
+
return allowlist_contains(
|
| 173 |
+
C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
|
| 174 |
+
custom_class_name);
|
| 175 |
+
#endif
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
|
| 179 |
+
// Add this API to pass arbitrary allowlist.
|
| 180 |
+
constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
|
| 181 |
+
return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
// Returns true iff the given dispatch key is on the allowlist
|
| 185 |
+
// and should be registered. When we turn this on, the list of valid
|
| 186 |
+
// mobile dispatch keys is hard coded (but you need to make sure
|
| 187 |
+
// that you have the correct set of dispatch keys for this).
|
| 188 |
+
constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
|
| 189 |
+
#ifdef C10_MOBILE
|
| 190 |
+
return true;
|
| 191 |
+
// Disabled for now: to be enabled later!
|
| 192 |
+
// return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
|
| 193 |
+
#else
|
| 194 |
+
return true;
|
| 195 |
+
#endif
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
} // namespace impl
|
| 199 |
+
} // namespace c10
|
parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h
ADDED
|
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
/**
|
| 4 |
+
* Include this file if you want to register operators. It includes all
|
| 5 |
+
* functionality needed to do so for you.
|
| 6 |
+
*/
|
| 7 |
+
|
| 8 |
+
#include <c10/core/DispatchKey.h>
|
| 9 |
+
#include <c10/core/DispatchKeySet.h>
|
| 10 |
+
#include <c10/core/CompileTimeFunctionPointer.h>
|
| 11 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
| 12 |
+
#include <ATen/core/dispatch/CppSignature.h>
|
| 13 |
+
#include <ATen/core/dispatch/RegistrationHandleRAII.h>
|
| 14 |
+
#include <ATen/core/op_registration/infer_schema.h>
|
| 15 |
+
#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD)
|
| 16 |
+
#include <torch/csrc/jit/frontend/function_schema_parser.h>
|
| 17 |
+
#endif
|
| 18 |
+
#include <ATen/core/ATenOpList.h>
|
| 19 |
+
|
| 20 |
+
namespace c10 {
|
| 21 |
+
|
| 22 |
+
namespace detail {
|
| 23 |
+
// The first argument of the schema might be of type DispatchKeySet, in which case we remove it.
|
| 24 |
+
// We do this because every argument in a function schema is expected to be convertable
|
| 25 |
+
// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of.
|
| 26 |
+
// See Note [Plumbing Keys Through The Dispatcher]
|
| 27 |
+
template<class KernelFunctor>
|
| 28 |
+
std::unique_ptr<FunctionSchema> inferFunctionSchemaFromFunctor() {
|
| 29 |
+
using func_type = typename c10::remove_DispatchKeySet_arg_from_func<KernelFunctor>::func_type;
|
| 30 |
+
return std::make_unique<FunctionSchema>(inferFunctionSchemaFlattenedReturns<func_type>());
|
| 31 |
+
}
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
/**
|
| 35 |
+
* An instance of this class handles the registration for one or more operators.
|
| 36 |
+
* Make sure you keep the RegisterOperators instance around since it will
|
| 37 |
+
* deregister the operator it's responsible for in its destructor.
|
| 38 |
+
*
|
| 39 |
+
* Example:
|
| 40 |
+
*
|
| 41 |
+
* > namespace {
|
| 42 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 43 |
+
* > public:
|
| 44 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 45 |
+
* > };
|
| 46 |
+
* > }
|
| 47 |
+
* >
|
| 48 |
+
* > static auto registry = c10::RegisterOperators()
|
| 49 |
+
* > .op(c10::RegisterOperators::options()
|
| 50 |
+
* > .schema("my_op")
|
| 51 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 52 |
+
*/
|
| 53 |
+
class TORCH_API RegisterOperators final {
|
| 54 |
+
public:
|
| 55 |
+
RegisterOperators() = default;
|
| 56 |
+
~RegisterOperators() = default;
|
| 57 |
+
|
| 58 |
+
RegisterOperators(const RegisterOperators&) = delete;
|
| 59 |
+
RegisterOperators& operator=(const RegisterOperators&) = delete;
|
| 60 |
+
RegisterOperators(RegisterOperators&&) noexcept = default;
|
| 61 |
+
RegisterOperators& operator=(RegisterOperators&&) noexcept = default;
|
| 62 |
+
|
| 63 |
+
class TORCH_API Options final {
|
| 64 |
+
public:
|
| 65 |
+
Options(const Options&) = delete;
|
| 66 |
+
Options(Options&&) noexcept = delete;
|
| 67 |
+
Options& operator=(const Options&) = delete;
|
| 68 |
+
Options& operator=(Options&&) noexcept = delete;
|
| 69 |
+
|
| 70 |
+
// internal-only for registering stack based kernels
|
| 71 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 72 |
+
Options&& kernel(DispatchKey dispatch_key) && {
|
| 73 |
+
return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// internal-only for registering stack based catch-all kernels
|
| 77 |
+
template<KernelFunction::BoxedKernelFunction* kernel_func>
|
| 78 |
+
Options&& catchAllKernel() && {
|
| 79 |
+
return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction<kernel_func>(), nullopt, nullptr);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// internal only for registering caffe2 ops
|
| 83 |
+
Options&& schema(FunctionSchema&& schema) {
|
| 84 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration.");
|
| 85 |
+
schemaOrName_ = FunctionSchema(std::move(schema));
|
| 86 |
+
return std::move(*this);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/**
|
| 90 |
+
* Use this to specify the schema for an operator. You can also specify
|
| 91 |
+
* the operator name only to have the function signature part of the
|
| 92 |
+
* schema be inferred from the kernel function.
|
| 93 |
+
*
|
| 94 |
+
* Example:
|
| 95 |
+
*
|
| 96 |
+
* > // Infer function signature from my_kernel_cpu
|
| 97 |
+
* > static auto registry = c10::RegisterOperators()
|
| 98 |
+
* > .op(c10::RegisterOperators::options()
|
| 99 |
+
* > .schema("my_op")
|
| 100 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 101 |
+
* >
|
| 102 |
+
* >
|
| 103 |
+
* > // Explicitly specify full schema
|
| 104 |
+
* > static auto registry = c10::RegisterOperators()
|
| 105 |
+
* > .op(c10::RegisterOperators::options()
|
| 106 |
+
* > .schema("my_op(Tensor a) -> Tensor")
|
| 107 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 108 |
+
*/
|
| 109 |
+
Options&& schema(const std::string& schemaOrName) {
|
| 110 |
+
TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration.");
|
| 111 |
+
|
| 112 |
+
#if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD)
|
| 113 |
+
throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
|
| 114 |
+
#else
|
| 115 |
+
schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName);
|
| 116 |
+
#endif
|
| 117 |
+
|
| 118 |
+
return std::move(*this);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
/**
|
| 122 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 123 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 124 |
+
* You can register multiple kernels for different dispatch keys.
|
| 125 |
+
*
|
| 126 |
+
* Example:
|
| 127 |
+
*
|
| 128 |
+
* > namespace {
|
| 129 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 130 |
+
* > public:
|
| 131 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 132 |
+
* > };
|
| 133 |
+
* > }
|
| 134 |
+
* >
|
| 135 |
+
* > static auto registry = c10::RegisterOperators()
|
| 136 |
+
* > .op(c10::RegisterOperators::options()
|
| 137 |
+
* > .schema("my_op")
|
| 138 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU));
|
| 139 |
+
*
|
| 140 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 141 |
+
* The arguments are defined in the kernel registration.
|
| 142 |
+
* Example:
|
| 143 |
+
*
|
| 144 |
+
* > namespace {
|
| 145 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 146 |
+
* > public:
|
| 147 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 148 |
+
* > : ... {...}
|
| 149 |
+
* >
|
| 150 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 151 |
+
* > };
|
| 152 |
+
* > }
|
| 153 |
+
* >
|
| 154 |
+
* > static auto registry = c10::RegisterOperators()
|
| 155 |
+
* > .op(c10::RegisterOperators::options()
|
| 156 |
+
* > .schema("my_op")
|
| 157 |
+
* > .kernel<my_kernel_cpu>(DispatchKey::CPU, "some_configuration", 3, true));
|
| 158 |
+
*/
|
| 159 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 160 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 161 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && {
|
| 162 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 163 |
+
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 164 |
+
|
| 165 |
+
return std::move(*this).kernel(
|
| 166 |
+
dispatch_key,
|
| 167 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 168 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 169 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 170 |
+
);
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
/**
|
| 174 |
+
* Use this to register an operator whose kernel is implemented as a functor.
|
| 175 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 176 |
+
* the input. Dispatch is disabled for this operator.
|
| 177 |
+
*
|
| 178 |
+
* Example:
|
| 179 |
+
*
|
| 180 |
+
* > namespace {
|
| 181 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 182 |
+
* > public:
|
| 183 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 184 |
+
* > };
|
| 185 |
+
* > }
|
| 186 |
+
* >
|
| 187 |
+
* > static auto registry = c10::RegisterOperators()
|
| 188 |
+
* > .op(c10::RegisterOperators::options()
|
| 189 |
+
* > .schema("my_op")
|
| 190 |
+
* > .catchAllKernel<my_kernel_cpu>());
|
| 191 |
+
*
|
| 192 |
+
* The functor constructor can take arguments to configure the kernel.
|
| 193 |
+
* The arguments are defined in the kernel registration.
|
| 194 |
+
* Example:
|
| 195 |
+
*
|
| 196 |
+
* > namespace {
|
| 197 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 198 |
+
* > public:
|
| 199 |
+
* > explicit my_kernel_cpu(std::string some_configuration, int a, bool b)
|
| 200 |
+
* > : ... {...}
|
| 201 |
+
* >
|
| 202 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 203 |
+
* > };
|
| 204 |
+
* > }
|
| 205 |
+
* >
|
| 206 |
+
* > static auto registry = c10::RegisterOperators()
|
| 207 |
+
* > .op(c10::RegisterOperators::options()
|
| 208 |
+
* > .schema("my_op")
|
| 209 |
+
* > .catchAllKernel<my_kernel_cpu>("some_configuration", 3, true));
|
| 210 |
+
*/
|
| 211 |
+
template<class KernelFunctor, class... ConstructorParameters>
|
| 212 |
+
// enable_if: only enable it if KernelFunctor is actually a functor
|
| 213 |
+
std::enable_if_t<guts::is_functor<KernelFunctor>::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && {
|
| 214 |
+
static_assert(std::is_base_of<OperatorKernel, KernelFunctor>::value, "Tried to register a kernel functor using the kernel<Functor>() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it.");
|
| 215 |
+
static_assert(std::is_constructible<KernelFunctor, ConstructorParameters...>::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel<Functor>(arguments...) must match one of the constructors of Functor.");
|
| 216 |
+
|
| 217 |
+
return std::move(*this).kernel(
|
| 218 |
+
c10::nullopt,
|
| 219 |
+
KernelFunction::makeFromUnboxedFunctor<false, KernelFunctor>(std::make_unique<KernelFunctor>(std::forward<ConstructorParameters>(constructorParameters)...)),
|
| 220 |
+
impl::CppSignature::make<KernelFunctor>(),
|
| 221 |
+
detail::inferFunctionSchemaFromFunctor<KernelFunctor>()
|
| 222 |
+
);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
/**
|
| 226 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 227 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 228 |
+
* You can register multiple kernels for different dispatch keys.
|
| 229 |
+
*
|
| 230 |
+
* Example:
|
| 231 |
+
*
|
| 232 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 233 |
+
* >
|
| 234 |
+
* > static auto registry = c10::RegisterOperators()
|
| 235 |
+
* > .op(c10::RegisterOperators::options()
|
| 236 |
+
* > .schema("my_op")
|
| 237 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(DispatchKey::CPU));
|
| 238 |
+
*/
|
| 239 |
+
template<class FuncType, FuncType* kernel_func>
|
| 240 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 241 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key) && {
|
| 242 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 243 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 244 |
+
|
| 245 |
+
return std::move(*this).kernel(
|
| 246 |
+
dispatch_key,
|
| 247 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 248 |
+
impl::CppSignature::make<FuncType>(),
|
| 249 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 250 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 251 |
+
);
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
/**
|
| 255 |
+
* Use this to register an operator whose kernel is implemented by a function.
|
| 256 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 257 |
+
* the input. Dispatch is disabled for this operator.
|
| 258 |
+
*
|
| 259 |
+
* Example:
|
| 260 |
+
*
|
| 261 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 262 |
+
* >
|
| 263 |
+
* > static auto registry = c10::RegisterOperators()
|
| 264 |
+
* > .op(c10::RegisterOperators::options()
|
| 265 |
+
* > .schema("my_op")
|
| 266 |
+
* > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 267 |
+
*/
|
| 268 |
+
template<class FuncType, FuncType* kernel_func>
|
| 269 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 270 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel() && {
|
| 271 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 272 |
+
static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 273 |
+
|
| 274 |
+
return std::move(*this).kernel(
|
| 275 |
+
c10::nullopt,
|
| 276 |
+
KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)),
|
| 277 |
+
impl::CppSignature::make<FuncType>(),
|
| 278 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 279 |
+
detail::inferFunctionSchemaFromFunctor<typename impl::WrapFunctionIntoFunctor<CompileTimeFunctionPointer<FuncType, kernel_func>>::type>()
|
| 280 |
+
);
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
template<class FuncType>
|
| 284 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 285 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && {
|
| 286 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 287 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 288 |
+
|
| 289 |
+
return std::move(*this).kernel(
|
| 290 |
+
dispatch_key,
|
| 291 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 292 |
+
impl::CppSignature::make<FuncType>(),
|
| 293 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 294 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 295 |
+
);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
template<class FuncType>
|
| 299 |
+
// enable_if: only enable it if FuncType is actually a function
|
| 300 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value, Options&&> catchAllKernel(FuncType* kernel_func) && {
|
| 301 |
+
static_assert(!std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API.");
|
| 302 |
+
TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr");
|
| 303 |
+
|
| 304 |
+
return std::move(*this).kernel(
|
| 305 |
+
c10::nullopt,
|
| 306 |
+
KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func),
|
| 307 |
+
impl::CppSignature::make<FuncType>(),
|
| 308 |
+
// TODO Do schema inference without relying on WrapFunctionIntoFunctor
|
| 309 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 310 |
+
);
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
/**
|
| 314 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 315 |
+
* The kernel is only called for inputs matching the given dispatch key.
|
| 316 |
+
* You can register multiple kernels for different dispatch keys.
|
| 317 |
+
*
|
| 318 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 319 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 320 |
+
* functor instead.
|
| 321 |
+
*
|
| 322 |
+
* Example:
|
| 323 |
+
*
|
| 324 |
+
* > static auto registry = c10::RegisterOperators()
|
| 325 |
+
* > .op(c10::RegisterOperators::options()
|
| 326 |
+
* > .schema("my_op")
|
| 327 |
+
* > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...}));
|
| 328 |
+
*/
|
| 329 |
+
template<class Lambda>
|
| 330 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 331 |
+
std::enable_if_t<
|
| 332 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 333 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 334 |
+
Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && {
|
| 335 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 336 |
+
|
| 337 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 338 |
+
// behavior would be nonobvious. A functor kernel with cache gets a new instance of
|
| 339 |
+
// its cache each time the kernel is looked up from the dispatch table.
|
| 340 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 341 |
+
// So, instead of making users having to think about it (including the thread-safety
|
| 342 |
+
// issues this causes), let's just forbid stateful lambdas altogether.
|
| 343 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 344 |
+
|
| 345 |
+
return std::move(*this).kernel(
|
| 346 |
+
dispatch_key,
|
| 347 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(functor)),
|
| 348 |
+
impl::CppSignature::make<Lambda>(),
|
| 349 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 350 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 351 |
+
);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
/**
|
| 355 |
+
* Use this to register an operator whose kernel is implemented as a lambda.
|
| 356 |
+
* The kernel is a catch-all kernel, meaning it's called independent from
|
| 357 |
+
* the input. Dispatch is disabled for this operator.
|
| 358 |
+
*
|
| 359 |
+
* The lambda must be stateless, i.e. not have a capture. If your kernel
|
| 360 |
+
* needs to store some configuration parameters, write the kernel as a
|
| 361 |
+
* functor instead.
|
| 362 |
+
*
|
| 363 |
+
* Example:
|
| 364 |
+
*
|
| 365 |
+
* > static auto registry = c10::RegisterOperators()
|
| 366 |
+
* > .op(c10::RegisterOperators::options()
|
| 367 |
+
* > .schema("my_op")
|
| 368 |
+
* > .catchAllKernel([] (Tensor a) -> Tensor {...}));
|
| 369 |
+
*/
|
| 370 |
+
template<class Lambda>
|
| 371 |
+
// enable_if: only enable it if Lambda is a functor (note: lambdas are functors)
|
| 372 |
+
std::enable_if_t<
|
| 373 |
+
guts::is_functor<std::decay_t<Lambda>>::value
|
| 374 |
+
&& !std::is_same<typename guts::infer_function_traits_t<std::decay_t<Lambda>>::func_type, KernelFunction::BoxedKernelFunction>::value,
|
| 375 |
+
Options&&> catchAllKernel(Lambda&& lambda) && {
|
| 376 |
+
static_assert(!std::is_base_of<OperatorKernel, std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel<Functor>() API instead.");
|
| 377 |
+
|
| 378 |
+
// We don't support stateful lambdas (i.e. lambdas with a capture), because their
|
| 379 |
+
// behavior would be nonobvious.
|
| 380 |
+
// A lambda with a capture would be global and share its capture between all kernel lookups.
|
| 381 |
+
// This would be a likely source for unexpected race conditions, so we forbid it.
|
| 382 |
+
// If a kernel really needs global state, they can just have regular global state
|
| 383 |
+
// in their .cpp file next to the kernel lambda.
|
| 384 |
+
static_assert(guts::is_stateless_lambda<std::decay_t<Lambda>>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel<Functor>() instead.");
|
| 385 |
+
|
| 386 |
+
return std::move(*this).kernel(
|
| 387 |
+
c10::nullopt,
|
| 388 |
+
KernelFunction::makeFromUnboxedLambda(std::forward<Lambda>(lambda)),
|
| 389 |
+
impl::CppSignature::make<Lambda>(),
|
| 390 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 391 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 392 |
+
);
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && {
|
| 396 |
+
TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration.");
|
| 397 |
+
aliasAnalysisKind_ = aliasAnalysisKind;
|
| 398 |
+
return std::move(*this);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
private:
|
| 402 |
+
Options&& kernel(std::optional<DispatchKey> dispatch_key, KernelFunction&& func, std::optional<impl::CppSignature> cpp_signature, std::unique_ptr<FunctionSchema>&& inferred_function_schema) && {
|
| 403 |
+
KernelRegistrationConfig config;
|
| 404 |
+
config.dispatch_key = dispatch_key;
|
| 405 |
+
config.func = std::move(func);
|
| 406 |
+
config.cpp_signature = cpp_signature;
|
| 407 |
+
config.inferred_function_schema = std::move(inferred_function_schema);
|
| 408 |
+
kernels.push_back(std::move(config));
|
| 409 |
+
return std::move(*this);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
Options()
|
| 413 |
+
: schemaOrName_(c10::nullopt)
|
| 414 |
+
, kernels()
|
| 415 |
+
, aliasAnalysisKind_(c10::nullopt)
|
| 416 |
+
{}
|
| 417 |
+
|
| 418 |
+
// KernelRegistrationConfig accumulates all information from the config
|
| 419 |
+
// parameters passed to a RegisterOperators::op() call into one object.
|
| 420 |
+
struct KernelRegistrationConfig final {
|
| 421 |
+
KernelRegistrationConfig()
|
| 422 |
+
: dispatch_key(c10::nullopt)
|
| 423 |
+
, func()
|
| 424 |
+
, cpp_signature(c10::nullopt)
|
| 425 |
+
, inferred_function_schema(nullptr)
|
| 426 |
+
{}
|
| 427 |
+
|
| 428 |
+
std::optional<DispatchKey> dispatch_key;
|
| 429 |
+
KernelFunction func;
|
| 430 |
+
std::optional<impl::CppSignature> cpp_signature;
|
| 431 |
+
std::unique_ptr<FunctionSchema> inferred_function_schema;
|
| 432 |
+
};
|
| 433 |
+
|
| 434 |
+
std::optional<std::variant<OperatorName, FunctionSchema>> schemaOrName_;
|
| 435 |
+
|
| 436 |
+
std::vector<KernelRegistrationConfig> kernels;
|
| 437 |
+
optional<AliasAnalysisKind> aliasAnalysisKind_;
|
| 438 |
+
friend class RegisterOperators;
|
| 439 |
+
friend class Library;
|
| 440 |
+
};
|
| 441 |
+
|
| 442 |
+
/**
|
| 443 |
+
* Call this to get an instance of registration options, which
|
| 444 |
+
* can be passed to a call to RegisterOperators::op() to specify
|
| 445 |
+
* these options for the operator registration.
|
| 446 |
+
* See class doc comment for examples.
|
| 447 |
+
*/
|
| 448 |
+
static Options options() {
|
| 449 |
+
return {};
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
/**
|
| 453 |
+
* Call this to register an operator. See class doc comment for examples.
|
| 454 |
+
*/
|
| 455 |
+
RegisterOperators&& op(Options&& options) && {
|
| 456 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 457 |
+
return std::move(*this);
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
// Regular mutator version of the && version above
|
| 461 |
+
RegisterOperators& op(Options&& options) & {
|
| 462 |
+
checkSchemaAndRegisterOp_(std::move(options));
|
| 463 |
+
return *this;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
/**
|
| 467 |
+
* This is a shorthand for RegisterOperators::op(Options) where you can
|
| 468 |
+
* specify the operator schema outside of the options parameter.
|
| 469 |
+
* See class doc comment for examples.
|
| 470 |
+
*/
|
| 471 |
+
RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && {
|
| 472 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName));
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
// internal only for registering caffe2 ops
|
| 476 |
+
RegisterOperators&& op(FunctionSchema schema, Options&& options) && {
|
| 477 |
+
return std::move(*this).op(std::move(options).schema(std::move(schema)));
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
template<class FuncType>
|
| 481 |
+
explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options())
|
| 482 |
+
: RegisterOperators() {
|
| 483 |
+
std::move(*this).op(schemaOrName, std::forward<FuncType>(func), std::move(options));
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
/**
|
| 487 |
+
* This API registers an operator based on a kernel function pointer.
|
| 488 |
+
*
|
| 489 |
+
* Given a kernel
|
| 490 |
+
*
|
| 491 |
+
* > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} }
|
| 492 |
+
*
|
| 493 |
+
* This API looks like:
|
| 494 |
+
*
|
| 495 |
+
* > static auto registry = c10::RegisterOperators()
|
| 496 |
+
* > .op("my_op", &my_kernel_cpu);
|
| 497 |
+
*
|
| 498 |
+
* If your kernel is small and the overhead of calling it matters,
|
| 499 |
+
* then this API might be the wrong choice since the following API
|
| 500 |
+
* has a slightly lower overhead for calling into the kernel:
|
| 501 |
+
*
|
| 502 |
+
* > static auto registry = c10::RegisterOperators()
|
| 503 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 504 |
+
* > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>());
|
| 505 |
+
*
|
| 506 |
+
* Or, alternatively, write your kernel as a functor:
|
| 507 |
+
*
|
| 508 |
+
* > namespace {
|
| 509 |
+
* > class my_kernel_cpu final : public c10::OperatorKernel {
|
| 510 |
+
* > public:
|
| 511 |
+
* > Tensor operator()(Tensor a, Tensor b) {...}
|
| 512 |
+
* > };
|
| 513 |
+
* > }
|
| 514 |
+
* >
|
| 515 |
+
* > static auto registry = c10::RegisterOperators()
|
| 516 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 517 |
+
* > .kernel<my_kernel_cpu>());
|
| 518 |
+
*/
|
| 519 |
+
template<class FuncType>
|
| 520 |
+
// enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction.
|
| 521 |
+
std::enable_if_t<guts::is_function_type<FuncType>::value && !std::is_same<FuncType, KernelFunction::BoxedKernelFunction>::value, RegisterOperators&&>
|
| 522 |
+
op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && {
|
| 523 |
+
constexpr bool AllowLegacyTypes = true;
|
| 524 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 525 |
+
c10::nullopt,
|
| 526 |
+
KernelFunction::makeFromUnboxedRuntimeFunction<AllowLegacyTypes>(func),
|
| 527 |
+
impl::CppSignature::make<FuncType>(),
|
| 528 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 529 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<FuncType>>>()
|
| 530 |
+
));
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
/**
|
| 534 |
+
* This API registers an operator based on a kernel lambda.
|
| 535 |
+
*
|
| 536 |
+
* This API looks like:
|
| 537 |
+
*
|
| 538 |
+
* > static auto registry = c10::RegisterOperators()
|
| 539 |
+
* > .op("my_op", [] (Tensor a, Tensor b) {...});
|
| 540 |
+
*
|
| 541 |
+
* This is equivalent to:
|
| 542 |
+
*
|
| 543 |
+
* > static auto registry = c10::RegisterOperators()
|
| 544 |
+
* > .op("my_op", c10::RegisterOperators::options()
|
| 545 |
+
* > .catchAllKernel([] (Tensor a, Tensor b) {...}));
|
| 546 |
+
*
|
| 547 |
+
*/
|
| 548 |
+
template<class Lambda>
|
| 549 |
+
// enable_if: only enable it if Lambda is actually a stateless lambda
|
| 550 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 551 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 552 |
+
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 553 |
+
|
| 554 |
+
constexpr bool AllowLegacyTypes = true;
|
| 555 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 556 |
+
c10::nullopt,
|
| 557 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 558 |
+
impl::CppSignature::make<Lambda>(),
|
| 559 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 560 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 561 |
+
));
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
template<class Lambda>
|
| 565 |
+
C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.")
|
| 566 |
+
// enable_if: only enable it if Lambda is actually a functor but not a stateless lambda
|
| 567 |
+
std::enable_if_t<guts::is_functor<Lambda>::value && !guts::is_stateless_lambda<std::decay_t<Lambda>>::value, RegisterOperators&&>
|
| 568 |
+
op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && {
|
| 569 |
+
static_assert(!std::is_base_of<OperatorKernel, Lambda>::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead.");
|
| 570 |
+
|
| 571 |
+
constexpr bool AllowLegacyTypes = true;
|
| 572 |
+
return std::move(*this).op(std::move(options).schema(schemaOrName).kernel(
|
| 573 |
+
c10::nullopt,
|
| 574 |
+
KernelFunction::makeFromUnboxedLambda<AllowLegacyTypes>(std::forward<Lambda>(lambda)),
|
| 575 |
+
impl::CppSignature::make<Lambda>(),
|
| 576 |
+
// TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor
|
| 577 |
+
detail::inferFunctionSchemaFromFunctor<impl::WrapFunctionIntoRuntimeFunctor<std::decay_t<Lambda>>>()
|
| 578 |
+
));
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
private:
|
| 582 |
+
void checkSchemaAndRegisterOp_(Options&& config);
|
| 583 |
+
|
| 584 |
+
static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options);
|
| 585 |
+
void checkNoDuplicateKernels_(const Options& options);
|
| 586 |
+
void registerOp_(Options&& options);
|
| 587 |
+
|
| 588 |
+
std::vector<RegistrationHandleRAII> registrars_;
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
} // namespace c10
|
| 592 |
+
|
| 593 |
+
namespace torch {
|
| 594 |
+
// Old-style API
|
| 595 |
+
using RegisterOperators = c10::RegisterOperators;
|
| 596 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/// Flush-To-Zero and Denormals-Are-Zero mode
|
| 2 |
+
///
|
| 3 |
+
/// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
|
| 4 |
+
/// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
|
| 5 |
+
/// and some x86 CPUs. They result in reduced precision for values near zero,
|
| 6 |
+
/// but increased performance.
|
| 7 |
+
///
|
| 8 |
+
/// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
|
| 9 |
+
|
| 10 |
+
namespace at::cpu {
|
| 11 |
+
|
| 12 |
+
bool set_flush_denormal(bool on);
|
| 13 |
+
|
| 14 |
+
} // namespace at::cpu
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
|
| 5 |
+
namespace at::cpu {
|
| 6 |
+
|
| 7 |
+
TORCH_API bool is_cpu_support_avx2();
|
| 8 |
+
TORCH_API bool is_cpu_support_avx512();
|
| 9 |
+
|
| 10 |
+
// Detect if CPU support Vector Neural Network Instruction.
|
| 11 |
+
TORCH_API bool is_cpu_support_vnni();
|
| 12 |
+
|
| 13 |
+
} // namespace at::cpu
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/functional_base.h>
|
| 4 |
+
#include <ATen/cpu/vec/functional_bfloat16.h>
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/vec.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
|
| 9 |
+
namespace at::vec {
|
| 10 |
+
|
| 11 |
+
// slow path
|
| 12 |
+
template <typename scalar_t, typename Op>
|
| 13 |
+
inline scalar_t vec_reduce_all(
|
| 14 |
+
const Op& vec_fun,
|
| 15 |
+
vec::Vectorized<scalar_t> acc_vec,
|
| 16 |
+
int64_t size) {
|
| 17 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 18 |
+
scalar_t acc_arr[Vec::size()];
|
| 19 |
+
acc_vec.store(acc_arr);
|
| 20 |
+
for (const auto i : c10::irange(1, size)) {
|
| 21 |
+
std::array<scalar_t, Vec::size()> acc_arr_next = {0};
|
| 22 |
+
acc_arr_next[0] = acc_arr[i];
|
| 23 |
+
Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
|
| 24 |
+
acc_vec = vec_fun(acc_vec, acc_vec_next);
|
| 25 |
+
}
|
| 26 |
+
acc_vec.store(acc_arr);
|
| 27 |
+
return acc_arr[0];
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
template <typename scalar_t, typename Op>
|
| 31 |
+
struct VecReduceAllSIMD {
|
| 32 |
+
static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
|
| 33 |
+
return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
#if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
|
| 38 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 39 |
+
template <typename Op>
|
| 40 |
+
struct VecReduceAllSIMD<float, Op> {
|
| 41 |
+
static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
|
| 42 |
+
using Vec = Vectorized<float>;
|
| 43 |
+
Vec v = acc_vec;
|
| 44 |
+
// 128-bit shuffle
|
| 45 |
+
Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
|
| 46 |
+
v = vec_fun(v, v1);
|
| 47 |
+
// 64-bit shuffle
|
| 48 |
+
v1 = _mm256_shuffle_ps(v, v, 0x4E);
|
| 49 |
+
v = vec_fun(v, v1);
|
| 50 |
+
// 32-bit shuffle
|
| 51 |
+
v1 = _mm256_shuffle_ps(v, v, 0xB1);
|
| 52 |
+
v = vec_fun(v, v1);
|
| 53 |
+
return _mm256_cvtss_f32(v);
|
| 54 |
+
}
|
| 55 |
+
};
|
| 56 |
+
#endif // defined(CPU_CAPABILITY_AVX2)
|
| 57 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 58 |
+
template <typename Op>
|
| 59 |
+
struct VecReduceAllSIMD<float, Op> {
|
| 60 |
+
static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
|
| 61 |
+
using Vec = Vectorized<float>;
|
| 62 |
+
Vec v = acc_vec;
|
| 63 |
+
// 256-bit shuffle
|
| 64 |
+
Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
|
| 65 |
+
v = vec_fun(v, v1);
|
| 66 |
+
// 128-bit shuffle
|
| 67 |
+
v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
|
| 68 |
+
v = vec_fun(v, v1);
|
| 69 |
+
// 64-bit shuffle
|
| 70 |
+
v1 = _mm512_shuffle_ps(v, v, 0x4E);
|
| 71 |
+
v = vec_fun(v, v1);
|
| 72 |
+
// 32-bit shuffle
|
| 73 |
+
v1 = _mm512_shuffle_ps(v, v, 0xB1);
|
| 74 |
+
v = vec_fun(v, v1);
|
| 75 |
+
return _mm512_cvtss_f32(v);
|
| 76 |
+
}
|
| 77 |
+
};
|
| 78 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 79 |
+
#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
|
| 80 |
+
|
| 81 |
+
#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
|
| 82 |
+
template <typename Op>
|
| 83 |
+
struct VecReduceAllSIMD<float, Op> {
|
| 84 |
+
static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
|
| 85 |
+
using Vec = Vectorized<float>;
|
| 86 |
+
Vec v = acc_vec;
|
| 87 |
+
|
| 88 |
+
// 128-bit shuffle: [a1, a2, a3, a4, a5, a6, a7, a8] -> [a5, a6, a7, a8, a1, a2, a3, a4]
|
| 89 |
+
Vec v1 = {v.get_high(), v.get_low()};
|
| 90 |
+
// [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] ('+' stands for the reduction function. Note that the last 4 elements are not required)
|
| 91 |
+
v = vec_fun(v, v1);
|
| 92 |
+
|
| 93 |
+
// 64-bit shuffle: [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] -> [a3+a7, a4+a8, a1+a5, a2+a6, -, -, -, -]
|
| 94 |
+
float32x4_t v1_1 = vextq_f32(v.get_low(), v.get_low(), 2);
|
| 95 |
+
v1 = {v1_1, v1_1};
|
| 96 |
+
// [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -]
|
| 97 |
+
v = vec_fun(v, v1);
|
| 98 |
+
|
| 99 |
+
// 32-bit shuffle: [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] -> [a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, -, -, -, -]
|
| 100 |
+
v1_1 = vrev64q_f32(v.get_low());
|
| 101 |
+
v1 = {v1_1, v1_1};
|
| 102 |
+
// [a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, -, -, -, -]
|
| 103 |
+
v = vec_fun(v, v1);
|
| 104 |
+
|
| 105 |
+
return v.get_low()[0];
|
| 106 |
+
}
|
| 107 |
+
};
|
| 108 |
+
#endif // defined(__aarch64__)
|
| 109 |
+
|
| 110 |
+
template <typename scalar_t, typename Op>
|
| 111 |
+
inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
|
| 112 |
+
return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
template <typename scalar_t, typename Op,
|
| 116 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 117 |
+
inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
|
| 118 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 119 |
+
if (size < Vec::size())
|
| 120 |
+
return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
|
| 121 |
+
int64_t d = Vec::size();
|
| 122 |
+
Vec acc_vec = Vec::loadu(data);
|
| 123 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 124 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 125 |
+
acc_vec = vec_fun(acc_vec, data_vec);
|
| 126 |
+
}
|
| 127 |
+
if (size - d > 0) {
|
| 128 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 129 |
+
acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
|
| 130 |
+
}
|
| 131 |
+
return vec_reduce_all(vec_fun, acc_vec);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// similar to reduce_all, but reduces into two outputs
|
| 135 |
+
template <typename scalar_t, typename Op1, typename Op2,
|
| 136 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 137 |
+
inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
|
| 138 |
+
const scalar_t* data, int64_t size) {
|
| 139 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 140 |
+
if (size < Vec::size()) {
|
| 141 |
+
auto loaded_data = Vec::loadu(data, size);
|
| 142 |
+
return std::pair<scalar_t, scalar_t>(
|
| 143 |
+
vec_reduce_all(vec_fun1, loaded_data, size),
|
| 144 |
+
vec_reduce_all(vec_fun2, loaded_data, size));
|
| 145 |
+
}
|
| 146 |
+
int64_t d = Vec::size();
|
| 147 |
+
Vec acc_vec1 = Vec::loadu(data);
|
| 148 |
+
Vec acc_vec2 = Vec::loadu(data);
|
| 149 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 150 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 151 |
+
acc_vec1 = vec_fun1(acc_vec1, data_vec);
|
| 152 |
+
acc_vec2 = vec_fun2(acc_vec2, data_vec);
|
| 153 |
+
}
|
| 154 |
+
if (size - d > 0) {
|
| 155 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 156 |
+
acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
|
| 157 |
+
acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
|
| 158 |
+
}
|
| 159 |
+
return std::pair<scalar_t, scalar_t>(
|
| 160 |
+
vec_reduce_all(vec_fun1, acc_vec1),
|
| 161 |
+
vec_reduce_all(vec_fun2, acc_vec2));
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 165 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 166 |
+
inline scalar_t map_reduce_all(
|
| 167 |
+
const MapOp& map_fun,
|
| 168 |
+
const ReduceOp& red_fun,
|
| 169 |
+
const scalar_t* data,
|
| 170 |
+
int64_t size) {
|
| 171 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 172 |
+
if (size < Vec::size())
|
| 173 |
+
return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
|
| 174 |
+
int64_t d = Vec::size();
|
| 175 |
+
Vec acc_vec = map_fun(Vec::loadu(data));
|
| 176 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 177 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 178 |
+
data_vec = map_fun(data_vec);
|
| 179 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 180 |
+
}
|
| 181 |
+
if (size - d > 0) {
|
| 182 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 183 |
+
data_vec = map_fun(data_vec);
|
| 184 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 185 |
+
}
|
| 186 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 190 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 191 |
+
inline scalar_t map2_reduce_all(
|
| 192 |
+
const MapOp& map_fun,
|
| 193 |
+
const ReduceOp& red_fun,
|
| 194 |
+
const scalar_t* data,
|
| 195 |
+
const scalar_t* data2,
|
| 196 |
+
int64_t size) {
|
| 197 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 198 |
+
if (size < Vec::size()) {
|
| 199 |
+
Vec data_vec = Vec::loadu(data, size);
|
| 200 |
+
Vec data2_vec = Vec::loadu(data2, size);
|
| 201 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 202 |
+
return vec_reduce_all(red_fun, data_vec, size);
|
| 203 |
+
}
|
| 204 |
+
int64_t d = Vec::size();
|
| 205 |
+
Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
|
| 206 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 207 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 208 |
+
Vec data2_vec = Vec::loadu(data2 + d);
|
| 209 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 210 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 211 |
+
}
|
| 212 |
+
if (size - d > 0) {
|
| 213 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 214 |
+
Vec data2_vec = Vec::loadu(data2 + d, size - d);
|
| 215 |
+
data_vec = map_fun(data_vec, data2_vec);
|
| 216 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 217 |
+
}
|
| 218 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 222 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 223 |
+
inline scalar_t map3_reduce_all(
|
| 224 |
+
const MapOp& map_fun,
|
| 225 |
+
const ReduceOp& red_fun,
|
| 226 |
+
const scalar_t* data,
|
| 227 |
+
const scalar_t* data2,
|
| 228 |
+
const scalar_t* data3,
|
| 229 |
+
int64_t size) {
|
| 230 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 231 |
+
if (size < Vec::size()) {
|
| 232 |
+
Vec data_vec = Vec::loadu(data, size);
|
| 233 |
+
Vec data2_vec = Vec::loadu(data2, size);
|
| 234 |
+
Vec data3_vec = Vec::loadu(data3, size);
|
| 235 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 236 |
+
return vec_reduce_all(red_fun, data_vec, size);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
int64_t d = Vec::size();
|
| 240 |
+
Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
|
| 241 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 242 |
+
Vec data_vec = Vec::loadu(data + d);
|
| 243 |
+
Vec data2_vec = Vec::loadu(data2 + d);
|
| 244 |
+
Vec data3_vec = Vec::loadu(data3 + d);
|
| 245 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 246 |
+
acc_vec = red_fun(acc_vec, data_vec);
|
| 247 |
+
}
|
| 248 |
+
if (size - d > 0) {
|
| 249 |
+
Vec data_vec = Vec::loadu(data + d, size - d);
|
| 250 |
+
Vec data2_vec = Vec::loadu(data2 + d, size - d);
|
| 251 |
+
Vec data3_vec = Vec::loadu(data3 + d, size - d);
|
| 252 |
+
data_vec = map_fun(data_vec, data2_vec, data3_vec);
|
| 253 |
+
acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
|
| 254 |
+
}
|
| 255 |
+
return vec_reduce_all(red_fun, acc_vec);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
template <typename scalar_t, typename Op,
|
| 259 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 260 |
+
inline void map(
|
| 261 |
+
const Op& vec_fun,
|
| 262 |
+
scalar_t* output_data,
|
| 263 |
+
const scalar_t* input_data,
|
| 264 |
+
int64_t size) {
|
| 265 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 266 |
+
int64_t d = 0;
|
| 267 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 268 |
+
Vec output_vec = vec_fun(Vec::loadu(input_data + d));
|
| 269 |
+
output_vec.store(output_data + d);
|
| 270 |
+
}
|
| 271 |
+
if (size - d > 0) {
|
| 272 |
+
Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
|
| 273 |
+
output_vec.store(output_data + d, size - d);
|
| 274 |
+
}
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
template <typename scalar_t, typename Op,
|
| 278 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 279 |
+
inline void map2(
|
| 280 |
+
const Op& vec_fun,
|
| 281 |
+
scalar_t* output_data,
|
| 282 |
+
const scalar_t* input_data,
|
| 283 |
+
const scalar_t* input_data2,
|
| 284 |
+
int64_t size) {
|
| 285 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 286 |
+
int64_t d = 0;
|
| 287 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 288 |
+
Vec data_vec = Vec::loadu(input_data + d);
|
| 289 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 290 |
+
Vec output_vec = vec_fun(data_vec, data_vec2);
|
| 291 |
+
output_vec.store(output_data + d);
|
| 292 |
+
}
|
| 293 |
+
if (size - d > 0) {
|
| 294 |
+
Vec data_vec = Vec::loadu(input_data + d, size - d);
|
| 295 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 296 |
+
Vec output_vec = vec_fun(data_vec, data_vec2);
|
| 297 |
+
output_vec.store(output_data + d, size - d);
|
| 298 |
+
}
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
template <typename scalar_t, typename Op,
|
| 302 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 303 |
+
inline void map3(
|
| 304 |
+
const Op& vec_fun,
|
| 305 |
+
scalar_t* output_data,
|
| 306 |
+
const scalar_t* input_data1,
|
| 307 |
+
const scalar_t* input_data2,
|
| 308 |
+
const scalar_t* input_data3,
|
| 309 |
+
int64_t size) {
|
| 310 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 311 |
+
int64_t d = 0;
|
| 312 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 313 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d);
|
| 314 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 315 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d);
|
| 316 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
|
| 317 |
+
output_vec.store(output_data + d);
|
| 318 |
+
}
|
| 319 |
+
if (size - d > 0) {
|
| 320 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
|
| 321 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 322 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
|
| 323 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
|
| 324 |
+
output_vec.store(output_data + d, size - d);
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
template <typename scalar_t, typename Op,
|
| 329 |
+
typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 330 |
+
inline void map4(
|
| 331 |
+
const Op& vec_fun,
|
| 332 |
+
scalar_t* output_data,
|
| 333 |
+
const scalar_t* input_data1,
|
| 334 |
+
const scalar_t* input_data2,
|
| 335 |
+
const scalar_t* input_data3,
|
| 336 |
+
const scalar_t* input_data4,
|
| 337 |
+
int64_t size) {
|
| 338 |
+
using Vec = vec::Vectorized<scalar_t>;
|
| 339 |
+
int64_t d = 0;
|
| 340 |
+
for (; d < size - (size % Vec::size()); d += Vec::size()) {
|
| 341 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d);
|
| 342 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d);
|
| 343 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d);
|
| 344 |
+
Vec data_vec4 = Vec::loadu(input_data4 + d);
|
| 345 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
|
| 346 |
+
output_vec.store(output_data + d);
|
| 347 |
+
}
|
| 348 |
+
if (size - d > 0) {
|
| 349 |
+
Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
|
| 350 |
+
Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
|
| 351 |
+
Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
|
| 352 |
+
Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
|
| 353 |
+
Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
|
| 354 |
+
output_vec.store(output_data + d, size - d);
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
} // namespace at::vec
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/vec.h>
|
| 7 |
+
|
| 8 |
+
namespace at::vec {
|
| 9 |
+
|
| 10 |
+
// BFloat16 specification
|
| 11 |
+
template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
|
| 12 |
+
template <> struct VecScalarType<BFloat16> { using type = float; };
|
| 13 |
+
template <> struct VecScalarType<Half> { using type = float; };
|
| 14 |
+
|
| 15 |
+
// This is different from at::acc_type since we only need to specialize BFloat16
|
| 16 |
+
template <typename scalar_t>
|
| 17 |
+
using vec_scalar_t = typename VecScalarType<scalar_t>::type;
|
| 18 |
+
|
| 19 |
+
// Vector conversion between float and bfloat16/half
|
| 20 |
+
template <typename scalar_t,
|
| 21 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 22 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
|
| 23 |
+
|
| 24 |
+
template <>
|
| 25 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
|
| 26 |
+
return convert_bfloat16_float(a);
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
template <>
|
| 30 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
|
| 31 |
+
return convert_half_float(a);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
template <typename scalar_t,
|
| 35 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 36 |
+
inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 40 |
+
return convert_float_bfloat16(a, b);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 45 |
+
return convert_float_half(a, b);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
template <typename scalar_t,
|
| 49 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 50 |
+
inline void load_to_float(const scalar_t *data, Vectorized<float> &out1, Vectorized<float> &out2);
|
| 51 |
+
|
| 52 |
+
template <>
|
| 53 |
+
inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out1, Vectorized<float> &out2) {
|
| 54 |
+
load_fp32_from_bf16(data, out1, out2);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
template <>
|
| 58 |
+
inline void load_to_float<Half> (const Half *data, Vectorized<float> &out1, Vectorized<float> &out2) {
|
| 59 |
+
load_fp32_from_fp16(data, out1, out2);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
template <typename scalar_t,
|
| 63 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 64 |
+
inline void load_to_float(const scalar_t *data, Vectorized<float> &out);
|
| 65 |
+
|
| 66 |
+
template <>
|
| 67 |
+
inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out) {
|
| 68 |
+
load_fp32_from_bf16(data, out);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template <>
|
| 72 |
+
inline void load_to_float<Half> (const Half *data, Vectorized<float> &out) {
|
| 73 |
+
load_fp32_from_fp16(data, out);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
|
| 77 |
+
// so the following functions would run smoothly:
|
| 78 |
+
// using Vec = Vectorized<BFloat16>;
|
| 79 |
+
// Vec one = Vec(BFloat16(1));
|
| 80 |
+
// vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
|
| 81 |
+
//
|
| 82 |
+
// Then why we still need to specialize "functional"?
|
| 83 |
+
// If we do specialization at Vectorized<> level, the above example would need 3 pairs of
|
| 84 |
+
// conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
|
| 85 |
+
// If we do specialization at vec::map<>() level, we have only 1 pair of conversion
|
| 86 |
+
// of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
|
| 87 |
+
//
|
| 88 |
+
// The following BFloat16 functionality will only do data type conversion for input
|
| 89 |
+
// and output vector (reduce functionality will only convert the final scalar back to bf16).
|
| 90 |
+
// Compared to Vectorized<> specialization,
|
| 91 |
+
// 1. better performance since we have less data type conversion;
|
| 92 |
+
// 2. less rounding error since immediate results are kept in fp32;
|
| 93 |
+
// 3. accumulation done on data type of fp32.
|
| 94 |
+
//
|
| 95 |
+
// If you plan to extend this file, please ensure adding unit tests at
|
| 96 |
+
// aten/src/ATen/test/vec_test_all_types.cpp
|
| 97 |
+
//
|
| 98 |
+
template <typename scalar_t, typename Op,
|
| 99 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 100 |
+
inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
|
| 101 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 102 |
+
using fVec = vec::Vectorized<float>;
|
| 103 |
+
if (size < bVec::size()) {
|
| 104 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 105 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 106 |
+
if (size > fVec::size()) {
|
| 107 |
+
data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 108 |
+
return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
|
| 109 |
+
} else {
|
| 110 |
+
return vec_reduce_all<float>(vec_fun, data_fvec0, size);
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
int64_t d = bVec::size();
|
| 114 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 115 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 116 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 117 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 118 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 119 |
+
acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
|
| 120 |
+
acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
|
| 121 |
+
}
|
| 122 |
+
if (size - d > 0) {
|
| 123 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 124 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 125 |
+
if (size - d > fVec::size()) {
|
| 126 |
+
acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
|
| 127 |
+
acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 128 |
+
} else {
|
| 129 |
+
acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
|
| 133 |
+
return vec_reduce_all<float>(vec_fun, acc_fvec0);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
template <typename scalar_t, typename Op1, typename Op2,
|
| 137 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 138 |
+
inline std::pair<float, float> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
|
| 139 |
+
const scalar_t* data, int64_t size) {
|
| 140 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 141 |
+
using fVec = vec::Vectorized<float>;
|
| 142 |
+
if (size < bVec::size()) {
|
| 143 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 144 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 145 |
+
if (size > fVec::size()) {
|
| 146 |
+
fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
|
| 147 |
+
fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
|
| 148 |
+
return std::pair<scalar_t, scalar_t>(
|
| 149 |
+
vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
|
| 150 |
+
vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
|
| 151 |
+
} else {
|
| 152 |
+
return std::pair<scalar_t, scalar_t>(
|
| 153 |
+
vec_reduce_all<float>(vec_fun1, data_fvec0, size),
|
| 154 |
+
vec_reduce_all<float>(vec_fun2, data_fvec0, size));
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
int64_t d = bVec::size();
|
| 158 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 159 |
+
auto [acc1_fvec0, acc1_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 160 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 161 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 162 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 163 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 164 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
|
| 165 |
+
acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
|
| 166 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
|
| 167 |
+
acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
|
| 168 |
+
}
|
| 169 |
+
if (size - d > 0) {
|
| 170 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 171 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 172 |
+
if (size - d > fVec::size()) {
|
| 173 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
|
| 174 |
+
acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
|
| 175 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
|
| 176 |
+
acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
|
| 177 |
+
} else {
|
| 178 |
+
acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
|
| 179 |
+
acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
|
| 183 |
+
acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
|
| 184 |
+
return std::pair<scalar_t, scalar_t>(
|
| 185 |
+
vec_reduce_all<float>(vec_fun1, acc1_fvec0),
|
| 186 |
+
vec_reduce_all<float>(vec_fun2, acc2_fvec0));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 190 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 191 |
+
inline float map_reduce_all(
|
| 192 |
+
const MapOp& map_fun,
|
| 193 |
+
const ReduceOp& red_fun,
|
| 194 |
+
const scalar_t* data,
|
| 195 |
+
int64_t size) {
|
| 196 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 197 |
+
using fVec = vec::Vectorized<float>;
|
| 198 |
+
if (size < bVec::size()) {
|
| 199 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 200 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 201 |
+
if (size > fVec::size()) {
|
| 202 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 203 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 204 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 205 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 206 |
+
} else {
|
| 207 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 208 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
int64_t d = bVec::size();
|
| 212 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 213 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 214 |
+
acc_fvec0 = map_fun(acc_fvec0);
|
| 215 |
+
acc_fvec1 = map_fun(acc_fvec1);
|
| 216 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 217 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 218 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 219 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 220 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 221 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 222 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 223 |
+
}
|
| 224 |
+
if (size - d > 0) {
|
| 225 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 226 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 227 |
+
if (size - d > fVec::size()) {
|
| 228 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 229 |
+
data_fvec1 = map_fun(data_fvec1);
|
| 230 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 231 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 232 |
+
} else {
|
| 233 |
+
data_fvec0 = map_fun(data_fvec0);
|
| 234 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 238 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 242 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 243 |
+
inline float map2_reduce_all(
|
| 244 |
+
const MapOp& map_fun,
|
| 245 |
+
const ReduceOp& red_fun,
|
| 246 |
+
const scalar_t* data,
|
| 247 |
+
const scalar_t* data2,
|
| 248 |
+
int64_t size) {
|
| 249 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 250 |
+
using fVec = vec::Vectorized<float>;
|
| 251 |
+
if (size < bVec::size()) {
|
| 252 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 253 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 254 |
+
bVec data2_bvec = bVec::loadu(data2, size);
|
| 255 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 256 |
+
if (size > fVec::size()) {
|
| 257 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 258 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 259 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 260 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 261 |
+
} else {
|
| 262 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 263 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 264 |
+
}
|
| 265 |
+
}
|
| 266 |
+
int64_t d = bVec::size();
|
| 267 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 268 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 269 |
+
bVec acc2_bvec = bVec::loadu(data2);
|
| 270 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
|
| 271 |
+
acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
|
| 272 |
+
acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
|
| 273 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 274 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 275 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 276 |
+
bVec data2_bvec = bVec::loadu(data2 + d);
|
| 277 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 278 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 279 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 280 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 281 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 282 |
+
}
|
| 283 |
+
if (size - d > 0) {
|
| 284 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 285 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 286 |
+
bVec data2_bvec = bVec::loadu(data2 + d, size - d);
|
| 287 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 288 |
+
if (size - d > fVec::size()) {
|
| 289 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 290 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1);
|
| 291 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 292 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 293 |
+
} else {
|
| 294 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0);
|
| 295 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 296 |
+
}
|
| 297 |
+
}
|
| 298 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 299 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
template <typename scalar_t, typename MapOp, typename ReduceOp,
|
| 303 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 304 |
+
inline float map3_reduce_all(
|
| 305 |
+
const MapOp& map_fun,
|
| 306 |
+
const ReduceOp& red_fun,
|
| 307 |
+
const scalar_t* data,
|
| 308 |
+
const scalar_t* data2,
|
| 309 |
+
const scalar_t* data3,
|
| 310 |
+
int64_t size) {
|
| 311 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 312 |
+
using fVec = vec::Vectorized<float>;
|
| 313 |
+
if (size < bVec::size()) {
|
| 314 |
+
bVec data_bvec = bVec::loadu(data, size);
|
| 315 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 316 |
+
bVec data2_bvec = bVec::loadu(data2, size);
|
| 317 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 318 |
+
bVec data3_bvec = bVec::loadu(data3, size);
|
| 319 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 320 |
+
if (size > fVec::size()) {
|
| 321 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 322 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 323 |
+
data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
|
| 324 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
|
| 325 |
+
} else {
|
| 326 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 327 |
+
return vec_reduce_all<float>(red_fun, data_fvec0, size);
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
int64_t d = bVec::size();
|
| 331 |
+
bVec acc_bvec = bVec::loadu(data);
|
| 332 |
+
auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
|
| 333 |
+
bVec acc2_bvec = bVec::loadu(data2);
|
| 334 |
+
auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
|
| 335 |
+
bVec acc3_bvec = bVec::loadu(data3);
|
| 336 |
+
auto [acc3_fvec0, acc3_fvec1] = convert_to_float<scalar_t>(acc3_bvec);
|
| 337 |
+
acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
|
| 338 |
+
acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
|
| 339 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 340 |
+
bVec data_bvec = bVec::loadu(data + d);
|
| 341 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 342 |
+
bVec data2_bvec = bVec::loadu(data2 + d);
|
| 343 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 344 |
+
bVec data3_bvec = bVec::loadu(data3 + d);
|
| 345 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 346 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 347 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 348 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 349 |
+
acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
|
| 350 |
+
}
|
| 351 |
+
if (size - d > 0) {
|
| 352 |
+
bVec data_bvec = bVec::loadu(data + d, size - d);
|
| 353 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 354 |
+
bVec data2_bvec = bVec::loadu(data2 + d, size - d);
|
| 355 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 356 |
+
bVec data3_bvec = bVec::loadu(data3 + d, size - d);
|
| 357 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 358 |
+
if (size - d > fVec::size()) {
|
| 359 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 360 |
+
data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
|
| 361 |
+
acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
|
| 362 |
+
acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
|
| 363 |
+
} else {
|
| 364 |
+
data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
|
| 365 |
+
acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
|
| 366 |
+
}
|
| 367 |
+
}
|
| 368 |
+
acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
|
| 369 |
+
return vec_reduce_all<float>(red_fun, acc_fvec0);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
template <typename scalar_t, typename Op,
|
| 373 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 374 |
+
inline void map(
|
| 375 |
+
const Op& vec_fun,
|
| 376 |
+
scalar_t* output_data,
|
| 377 |
+
const scalar_t* input_data,
|
| 378 |
+
int64_t size) {
|
| 379 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 380 |
+
using fVec = vec::Vectorized<float>;
|
| 381 |
+
int64_t d = 0;
|
| 382 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 383 |
+
bVec data_bvec = bVec::loadu(input_data + d);
|
| 384 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 385 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 386 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 387 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 388 |
+
output_bvec.store(output_data + d);
|
| 389 |
+
}
|
| 390 |
+
if (size - d > 0) {
|
| 391 |
+
bVec data_bvec = bVec::loadu(input_data + d, size - d);
|
| 392 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 393 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 394 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 395 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 396 |
+
output_bvec.store(output_data + d, size - d);
|
| 397 |
+
}
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
template <typename scalar_t, typename Op,
|
| 401 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 402 |
+
inline void map(
|
| 403 |
+
const Op& vec_fun,
|
| 404 |
+
scalar_t* output_data,
|
| 405 |
+
const float* input_data,
|
| 406 |
+
int64_t size) {
|
| 407 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 408 |
+
using fVec = vec::Vectorized<float>;
|
| 409 |
+
int64_t d = 0;
|
| 410 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 411 |
+
fVec data_fvec0 = fVec::loadu(input_data + d);
|
| 412 |
+
fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
|
| 413 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 414 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 415 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 416 |
+
output_bvec.store(output_data + d);
|
| 417 |
+
}
|
| 418 |
+
if (size - d > 0) {
|
| 419 |
+
fVec data_fvec0, data_fvec1;
|
| 420 |
+
if (size - d > fVec::size()) {
|
| 421 |
+
data_fvec0 = fVec::loadu(input_data + d);
|
| 422 |
+
data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
|
| 423 |
+
} else {
|
| 424 |
+
// choose to align with behaviour of bVec::loadu(ptr, size),
|
| 425 |
+
// which leaves data_fvec1 uninitialized
|
| 426 |
+
data_fvec0 = fVec::loadu(input_data + d, size - d);
|
| 427 |
+
}
|
| 428 |
+
fVec output_fvec0 = vec_fun(data_fvec0);
|
| 429 |
+
fVec output_fvec1 = vec_fun(data_fvec1);
|
| 430 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 431 |
+
output_bvec.store(output_data + d, size - d);
|
| 432 |
+
}
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <typename scalar_t, typename Op,
|
| 436 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 437 |
+
inline void map2(
|
| 438 |
+
const Op& vec_fun,
|
| 439 |
+
scalar_t* output_data,
|
| 440 |
+
const scalar_t* input_data,
|
| 441 |
+
const scalar_t* input_data2,
|
| 442 |
+
int64_t size) {
|
| 443 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 444 |
+
using fVec = vec::Vectorized<float>;
|
| 445 |
+
int64_t d = 0;
|
| 446 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 447 |
+
bVec data_bvec = bVec::loadu(input_data + d);
|
| 448 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 449 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 450 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 451 |
+
fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
|
| 452 |
+
fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
|
| 453 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 454 |
+
output_bvec.store(output_data + d);
|
| 455 |
+
}
|
| 456 |
+
if (size - d > 0) {
|
| 457 |
+
bVec data_bvec = bVec::loadu(input_data + d, size - d);
|
| 458 |
+
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
|
| 459 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 460 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 461 |
+
fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
|
| 462 |
+
fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
|
| 463 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 464 |
+
output_bvec.store(output_data + d, size - d);
|
| 465 |
+
}
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
template <typename scalar_t, typename Op,
|
| 469 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 470 |
+
inline void map3(
|
| 471 |
+
const Op& vec_fun,
|
| 472 |
+
scalar_t* output_data,
|
| 473 |
+
const scalar_t* input_data1,
|
| 474 |
+
const scalar_t* input_data2,
|
| 475 |
+
const scalar_t* input_data3,
|
| 476 |
+
int64_t size) {
|
| 477 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 478 |
+
using fVec = vec::Vectorized<float>;
|
| 479 |
+
int64_t d = 0;
|
| 480 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 481 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d);
|
| 482 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 483 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 484 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 485 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d);
|
| 486 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 487 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
|
| 488 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
|
| 489 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 490 |
+
output_bvec.store(output_data + d);
|
| 491 |
+
}
|
| 492 |
+
if (size - d > 0) {
|
| 493 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
|
| 494 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 495 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 496 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 497 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
|
| 498 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 499 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
|
| 500 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
|
| 501 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 502 |
+
output_bvec.store(output_data + d, size - d);
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
template <typename scalar_t, typename Op,
|
| 507 |
+
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
|
| 508 |
+
inline void map4(
|
| 509 |
+
const Op& vec_fun,
|
| 510 |
+
scalar_t* output_data,
|
| 511 |
+
const scalar_t* input_data1,
|
| 512 |
+
const scalar_t* input_data2,
|
| 513 |
+
const scalar_t* input_data3,
|
| 514 |
+
const scalar_t* input_data4,
|
| 515 |
+
int64_t size) {
|
| 516 |
+
using bVec = vec::Vectorized<scalar_t>;
|
| 517 |
+
using fVec = vec::Vectorized<float>;
|
| 518 |
+
int64_t d = 0;
|
| 519 |
+
for (; d < size - (size % bVec::size()); d += bVec::size()) {
|
| 520 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d);
|
| 521 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 522 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d);
|
| 523 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 524 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d);
|
| 525 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 526 |
+
bVec data4_bvec = bVec::loadu(input_data4 + d);
|
| 527 |
+
auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
|
| 528 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
|
| 529 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
|
| 530 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 531 |
+
output_bvec.store(output_data + d);
|
| 532 |
+
}
|
| 533 |
+
if (size - d > 0) {
|
| 534 |
+
bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
|
| 535 |
+
auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
|
| 536 |
+
bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
|
| 537 |
+
auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
|
| 538 |
+
bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
|
| 539 |
+
auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
|
| 540 |
+
bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
|
| 541 |
+
auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
|
| 542 |
+
fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
|
| 543 |
+
fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
|
| 544 |
+
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
|
| 545 |
+
output_bvec.store(output_data + d, size - d);
|
| 546 |
+
}
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
} // namespace at::vec
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
|
| 3 |
+
/* GCC or clang-compatible compiler, targeting x86/x86-64 */
|
| 4 |
+
#include <x86intrin.h>
|
| 5 |
+
#elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
|
| 6 |
+
/* Clang-compatible compiler, targeting arm neon */
|
| 7 |
+
#include <arm_neon.h>
|
| 8 |
+
#elif defined(_MSC_VER)
|
| 9 |
+
/* Microsoft C/C++-compatible compiler */
|
| 10 |
+
#include <intrin.h>
|
| 11 |
+
#if _MSC_VER <= 1900
|
| 12 |
+
#define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2))
|
| 13 |
+
#define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4))
|
| 14 |
+
#define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8))
|
| 15 |
+
#define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16))
|
| 16 |
+
#endif
|
| 17 |
+
#elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
|
| 18 |
+
/* GCC-compatible compiler, targeting ARM with NEON */
|
| 19 |
+
#include <arm_neon.h>
|
| 20 |
+
#if defined (MISSING_ARM_VLD1)
|
| 21 |
+
#include <ATen/cpu/vec/vec256/missing_vld1_neon.h>
|
| 22 |
+
#elif defined (MISSING_ARM_VST1)
|
| 23 |
+
#include <ATen/cpu/vec/vec256/missing_vst1_neon.h>
|
| 24 |
+
#endif
|
| 25 |
+
#elif defined(__GNUC__) && defined(__IWMMXT__)
|
| 26 |
+
/* GCC-compatible compiler, targeting ARM with WMMX */
|
| 27 |
+
#include <mmintrin.h>
|
| 28 |
+
#elif defined(__s390x__)
|
| 29 |
+
// targets Z/architecture
|
| 30 |
+
// we will include vecintrin later
|
| 31 |
+
#elif (defined(__GNUC__) || defined(__xlC__)) && \
|
| 32 |
+
(defined(__VEC__) || defined(__ALTIVEC__))
|
| 33 |
+
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
|
| 34 |
+
#include <altivec.h>
|
| 35 |
+
/* We need to undef those tokens defined by <altivec.h> to avoid conflicts
|
| 36 |
+
with the C++ types. => Can still use __bool/__vector */
|
| 37 |
+
#undef bool
|
| 38 |
+
#undef vector
|
| 39 |
+
#undef pixel
|
| 40 |
+
#elif defined(__GNUC__) && defined(__SPE__)
|
| 41 |
+
/* GCC-compatible compiler, targeting PowerPC with SPE */
|
| 42 |
+
#include <spe.h>
|
| 43 |
+
#endif
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 4 |
+
#include <ATen/cpu/vec/vec512/vec512.h>
|
| 5 |
+
#else
|
| 6 |
+
#include <ATen/cpu/vec/vec256/vec256.h>
|
| 7 |
+
#endif
|
| 8 |
+
|
| 9 |
+
namespace at::vec {
|
| 10 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 11 |
+
inline namespace CPU_CAPABILITY {
|
| 12 |
+
|
| 13 |
+
inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
|
| 14 |
+
__at_align__ bool buffer[x.size()];
|
| 15 |
+
x.ne(Vectorized<int8_t>(0)).store(buffer);
|
| 16 |
+
|
| 17 |
+
Vectorized<bool> ret;
|
| 18 |
+
static_assert(x.size() == ret.size(), "");
|
| 19 |
+
std::memcpy(ret, buffer, ret.size() * sizeof(bool));
|
| 20 |
+
return ret;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
template <>
|
| 24 |
+
inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
|
| 25 |
+
// See NOTE [Loading boolean values]
|
| 26 |
+
return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
template <>
|
| 30 |
+
inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
|
| 31 |
+
// See NOTE [Loading boolean values]
|
| 32 |
+
return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
template <typename VT>
|
| 36 |
+
struct VecHoldType { using hold_type = typename VT::value_type; };
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
|
| 40 |
+
|
| 41 |
+
template <>
|
| 42 |
+
struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
|
| 43 |
+
|
| 44 |
+
template <typename VT>
|
| 45 |
+
using vechold_type = typename VecHoldType<VT>::hold_type;
|
| 46 |
+
|
| 47 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Workaround for missing vst1q_f32_x2 in gcc-8. */
|
| 2 |
+
|
| 3 |
+
__extension__ extern __inline void
|
| 4 |
+
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
|
| 5 |
+
vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
|
| 6 |
+
{
|
| 7 |
+
asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
|
| 8 |
+
}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h
ADDED
|
@@ -0,0 +1,1174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#pragma GCC diagnostic push
|
| 16 |
+
#pragma GCC diagnostic ignored "-Wignored-qualifiers"
|
| 17 |
+
|
| 18 |
+
namespace at::vec {
|
| 19 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 20 |
+
inline namespace CPU_CAPABILITY {
|
| 21 |
+
|
| 22 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 23 |
+
|
| 24 |
+
#ifndef SLEEF_CONST
|
| 25 |
+
#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
|
| 26 |
+
#define SLEEF_CONST const
|
| 27 |
+
#else
|
| 28 |
+
#define SLEEF_CONST
|
| 29 |
+
#endif
|
| 30 |
+
#define SLEEF_CONST_OLD SLEEF_CONST
|
| 31 |
+
#else
|
| 32 |
+
#define SLEEF_CONST_OLD
|
| 33 |
+
#endif
|
| 34 |
+
|
| 35 |
+
// bfloat16 conversion
|
| 36 |
+
static inline void cvtbf16_fp32(const __m128i& a, __m256& o) {
|
| 37 |
+
o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16));
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
|
| 41 |
+
__m128i lo = _mm256_extractf128_si256(a, 0);
|
| 42 |
+
__m128i hi = _mm256_extractf128_si256(a, 1);
|
| 43 |
+
cvtbf16_fp32(lo, o1);
|
| 44 |
+
cvtbf16_fp32(hi, o2);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
static inline __m128i cvtfp32_bf16(const __m256& src) {
|
| 48 |
+
__m256i value = _mm256_castps_si256(src);
|
| 49 |
+
__m256i nan = _mm256_set1_epi32(0xffff);
|
| 50 |
+
__m256i mask = _mm256_castps_si256(_mm256_cmp_ps(src, src, _CMP_ORD_Q));
|
| 51 |
+
__m256i ones = _mm256_set1_epi32(0x1);
|
| 52 |
+
__m256i vec_bias = _mm256_set1_epi32(0x7fff);
|
| 53 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 54 |
+
auto t_value = _mm256_and_si256(_mm256_srli_epi32(value, 16), ones);
|
| 55 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 56 |
+
t_value = _mm256_add_epi32(t_value, vec_bias);
|
| 57 |
+
// input += rounding_bias;
|
| 58 |
+
t_value = _mm256_add_epi32(t_value, value);
|
| 59 |
+
// input = input >> 16;
|
| 60 |
+
t_value = _mm256_srli_epi32(t_value, 16);
|
| 61 |
+
// Check NaN before converting back to bf16
|
| 62 |
+
t_value = _mm256_blendv_epi8(nan, t_value, mask);
|
| 63 |
+
t_value = _mm256_packus_epi32(t_value, t_value); // t[4-7] t[4-7] t[0-4] t[0-4]
|
| 64 |
+
t_value = _mm256_permute4x64_epi64(t_value, 0xd8); // 11 01 10 00
|
| 65 |
+
return _mm256_castsi256_si128(t_value);
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) {
|
| 69 |
+
__m256i lo = _mm256_castps_si256(a);
|
| 70 |
+
__m256i hi = _mm256_castps_si256(b);
|
| 71 |
+
__m256i nan = _mm256_set1_epi32(0xffff);
|
| 72 |
+
__m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q));
|
| 73 |
+
__m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q));
|
| 74 |
+
__m256i ones = _mm256_set1_epi32(0x1);
|
| 75 |
+
__m256i vec_bias = _mm256_set1_epi32(0x7fff);
|
| 76 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 77 |
+
auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones);
|
| 78 |
+
auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones);
|
| 79 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 80 |
+
t_lo = _mm256_add_epi32(t_lo, vec_bias);
|
| 81 |
+
t_hi = _mm256_add_epi32(t_hi, vec_bias);
|
| 82 |
+
// input += rounding_bias;
|
| 83 |
+
t_lo = _mm256_add_epi32(t_lo, lo);
|
| 84 |
+
t_hi = _mm256_add_epi32(t_hi, hi);
|
| 85 |
+
// input = input >> 16;
|
| 86 |
+
t_lo = _mm256_srli_epi32(t_lo, 16);
|
| 87 |
+
t_hi = _mm256_srli_epi32(t_hi, 16);
|
| 88 |
+
// Check NaN before converting back to bf16
|
| 89 |
+
t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo);
|
| 90 |
+
t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi);
|
| 91 |
+
|
| 92 |
+
t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
|
| 93 |
+
return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
static inline __m256i merge_compare_result(const __m256& a, const __m256& b) {
|
| 97 |
+
__m256i lo = _mm256_castps_si256(a);
|
| 98 |
+
__m256i hi = _mm256_castps_si256(b);
|
| 99 |
+
lo = _mm256_srli_epi32(lo, 16);
|
| 100 |
+
hi = _mm256_srli_epi32(hi, 16);
|
| 101 |
+
auto out = _mm256_packus_epi32(lo, hi);
|
| 102 |
+
return _mm256_permute4x64_epi64(out, 0xd8);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
// float16 conversion
|
| 106 |
+
static inline void cvtfp16_fp32(const __m128i& a, __m256& o) {
|
| 107 |
+
o = _mm256_cvtph_ps(a);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
|
| 111 |
+
__m128i lo = _mm256_extractf128_si256(a, 0);
|
| 112 |
+
__m128i hi = _mm256_extractf128_si256(a, 1);
|
| 113 |
+
cvtfp16_fp32(lo, o1);
|
| 114 |
+
cvtfp16_fp32(hi, o2);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
static inline __m128i cvtfp32_fp16(const __m256& src) {
|
| 118 |
+
return _mm256_cvtps_ph(
|
| 119 |
+
src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) {
|
| 123 |
+
__m128i lo = _mm256_cvtps_ph(
|
| 124 |
+
a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 125 |
+
__m128i hi = _mm256_cvtps_ph(
|
| 126 |
+
b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 127 |
+
return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// dtype conversion between float16/bfloat16 and float32
|
| 131 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 132 |
+
inline void cvt_to_fp32(const __m128i& a, __m256& o);
|
| 133 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m128i& a, __m256& o) {
|
| 134 |
+
cvtbf16_fp32(a, o);
|
| 135 |
+
};
|
| 136 |
+
template <> inline void cvt_to_fp32<Half>(const __m128i& a, __m256& o) {
|
| 137 |
+
cvtfp16_fp32(a, o);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 141 |
+
inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2);
|
| 142 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m256& o1, __m256& o2) {
|
| 143 |
+
cvtbf16_fp32(a, o1, o2);
|
| 144 |
+
}
|
| 145 |
+
template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m256& o1, __m256& o2) {
|
| 146 |
+
cvtfp16_fp32(a, o1, o2);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
template <typename T, bool is_compare_op = false,
|
| 150 |
+
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 151 |
+
inline __m256i cvt_from_fp32(const __m256& a, const __m256& b);
|
| 152 |
+
template <> inline __m256i cvt_from_fp32<BFloat16, false>(const __m256& a, const __m256& b) {
|
| 153 |
+
return cvtfp32_bf16(a, b);
|
| 154 |
+
}
|
| 155 |
+
template <> inline __m256i cvt_from_fp32<BFloat16, true>(const __m256& a, const __m256& b) {
|
| 156 |
+
return merge_compare_result(a, b);
|
| 157 |
+
}
|
| 158 |
+
template <> inline __m256i cvt_from_fp32<Half, false>(const __m256& a, const __m256& b) {
|
| 159 |
+
return cvtfp32_fp16(a, b);
|
| 160 |
+
}
|
| 161 |
+
template <> inline __m256i cvt_from_fp32<Half, true>(const __m256& a, const __m256& b) {
|
| 162 |
+
return cvtfp32_fp16(a, b);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template <typename T>
|
| 166 |
+
class Vectorized16 {
|
| 167 |
+
static_assert(
|
| 168 |
+
is_reduced_floating_point_v<T>,
|
| 169 |
+
"Support only float16 and bfloat16.");
|
| 170 |
+
protected:
|
| 171 |
+
__m256i values;
|
| 172 |
+
public:
|
| 173 |
+
using value_type = uint16_t;
|
| 174 |
+
using size_type = int;
|
| 175 |
+
static constexpr size_type size() {
|
| 176 |
+
return 16;
|
| 177 |
+
}
|
| 178 |
+
Vectorized16() {}
|
| 179 |
+
Vectorized16(__m256i v) : values(v) {}
|
| 180 |
+
Vectorized16(T val) {
|
| 181 |
+
value_type uw = val.x;
|
| 182 |
+
values = _mm256_set1_epi16(uw);
|
| 183 |
+
}
|
| 184 |
+
Vectorized16(T val1, T val2, T val3, T val4,
|
| 185 |
+
T val5, T val6, T val7, T val8,
|
| 186 |
+
T val9, T val10, T val11, T val12,
|
| 187 |
+
T val13, T val14, T val15, T val16) {
|
| 188 |
+
values = _mm256_setr_epi16(
|
| 189 |
+
val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x,
|
| 190 |
+
val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x);
|
| 191 |
+
}
|
| 192 |
+
operator __m256i() const {
|
| 193 |
+
return values;
|
| 194 |
+
}
|
| 195 |
+
T& operator[](int idx) = delete;
|
| 196 |
+
const T& operator[](int idx) const = delete;
|
| 197 |
+
int zero_mask() const {
|
| 198 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 199 |
+
__m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0));
|
| 200 |
+
return _mm256_movemask_epi8(cmp);
|
| 201 |
+
}
|
| 202 |
+
static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
|
| 203 |
+
if (count == size())
|
| 204 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
| 205 |
+
|
| 206 |
+
__at_align__ int16_t tmp_values[size()];
|
| 207 |
+
std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
|
| 208 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(tmp_values));
|
| 209 |
+
}
|
| 210 |
+
void store(void* ptr, int count = size()) const {
|
| 211 |
+
if (count == size()) {
|
| 212 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
| 213 |
+
} else if (count > 0) {
|
| 214 |
+
__at_align__ int16_t tmp_values[size()];
|
| 215 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
| 216 |
+
std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
template <int64_t mask>
|
| 220 |
+
static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 221 |
+
__at_align__ int16_t tmp_values[size()];
|
| 222 |
+
a.store(tmp_values);
|
| 223 |
+
if (mask & 0x01)
|
| 224 |
+
tmp_values[0] = _mm256_extract_epi16(b.values, 0);
|
| 225 |
+
if (mask & 0x02)
|
| 226 |
+
tmp_values[1] = _mm256_extract_epi16(b.values, 1);
|
| 227 |
+
if (mask & 0x04)
|
| 228 |
+
tmp_values[2] = _mm256_extract_epi16(b.values, 2);
|
| 229 |
+
if (mask & 0x08)
|
| 230 |
+
tmp_values[3] = _mm256_extract_epi16(b.values, 3);
|
| 231 |
+
if (mask & 0x10)
|
| 232 |
+
tmp_values[4] = _mm256_extract_epi16(b.values, 4);
|
| 233 |
+
if (mask & 0x20)
|
| 234 |
+
tmp_values[5] = _mm256_extract_epi16(b.values, 5);
|
| 235 |
+
if (mask & 0x40)
|
| 236 |
+
tmp_values[6] = _mm256_extract_epi16(b.values, 6);
|
| 237 |
+
if (mask & 0x80)
|
| 238 |
+
tmp_values[7] = _mm256_extract_epi16(b.values, 7);
|
| 239 |
+
if (mask & 0x100)
|
| 240 |
+
tmp_values[8] = _mm256_extract_epi16(b.values, 8);
|
| 241 |
+
if (mask & 0x200)
|
| 242 |
+
tmp_values[9] = _mm256_extract_epi16(b.values, 9);
|
| 243 |
+
if (mask & 0x400)
|
| 244 |
+
tmp_values[10] = _mm256_extract_epi16(b.values, 10);
|
| 245 |
+
if (mask & 0x800)
|
| 246 |
+
tmp_values[11] = _mm256_extract_epi16(b.values, 11);
|
| 247 |
+
if (mask & 0x1000)
|
| 248 |
+
tmp_values[12] = _mm256_extract_epi16(b.values, 12);
|
| 249 |
+
if (mask & 0x2000)
|
| 250 |
+
tmp_values[13] = _mm256_extract_epi16(b.values, 13);
|
| 251 |
+
if (mask & 0x4000)
|
| 252 |
+
tmp_values[14] = _mm256_extract_epi16(b.values, 14);
|
| 253 |
+
if (mask & 0x8000)
|
| 254 |
+
tmp_values[15] = _mm256_extract_epi16(b.values, 15);
|
| 255 |
+
return loadu(tmp_values);
|
| 256 |
+
}
|
| 257 |
+
static Vectorized<T> blendv(const Vectorized<T>& a,
|
| 258 |
+
const Vectorized<T>& b, const Vectorized<T>& mask) {
|
| 259 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
| 260 |
+
}
|
| 261 |
+
template<typename step_t>
|
| 262 |
+
static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 263 |
+
return Vectorized<T>(
|
| 264 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 265 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 266 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 267 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
| 268 |
+
}
|
| 269 |
+
static Vectorized<T> set(const Vectorized<T>& a,
|
| 270 |
+
const Vectorized<T>& b, int64_t count = size()) {
|
| 271 |
+
switch (count) {
|
| 272 |
+
case 0:
|
| 273 |
+
return a;
|
| 274 |
+
case 1:
|
| 275 |
+
return blend<1>(a, b);
|
| 276 |
+
case 2:
|
| 277 |
+
return blend<3>(a, b);
|
| 278 |
+
case 3:
|
| 279 |
+
return blend<7>(a, b);
|
| 280 |
+
case 4:
|
| 281 |
+
return blend<15>(a, b);
|
| 282 |
+
case 5:
|
| 283 |
+
return blend<31>(a, b);
|
| 284 |
+
case 6:
|
| 285 |
+
return blend<63>(a, b);
|
| 286 |
+
case 7:
|
| 287 |
+
return blend<127>(a, b);
|
| 288 |
+
case 8:
|
| 289 |
+
return blend<255>(a, b);
|
| 290 |
+
case 9:
|
| 291 |
+
return blend<511>(a, b);
|
| 292 |
+
case 10:
|
| 293 |
+
return blend<1023>(a, b);
|
| 294 |
+
case 11:
|
| 295 |
+
return blend<2047>(a, b);
|
| 296 |
+
case 12:
|
| 297 |
+
return blend<4095>(a, b);
|
| 298 |
+
case 13:
|
| 299 |
+
return blend<8191>(a, b);
|
| 300 |
+
case 14:
|
| 301 |
+
return blend<16383>(a, b);
|
| 302 |
+
case 15:
|
| 303 |
+
return blend<32767>(a, b);
|
| 304 |
+
}
|
| 305 |
+
return b;
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
Vectorized<T> map(SLEEF_CONST __m256 (*SLEEF_CONST_OLD vop)(__m256)) const {
|
| 309 |
+
__m256 lo, hi;
|
| 310 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 311 |
+
const auto o1 = vop(lo);
|
| 312 |
+
const auto o2 = vop(hi);
|
| 313 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 314 |
+
}
|
| 315 |
+
Vectorized<T> isnan() const {
|
| 316 |
+
__m256 lo, hi;
|
| 317 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 318 |
+
lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
|
| 319 |
+
hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
|
| 320 |
+
return merge_compare_result(lo, hi);
|
| 321 |
+
}
|
| 322 |
+
Vectorized<T> abs() const {
|
| 323 |
+
return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values);
|
| 324 |
+
}
|
| 325 |
+
Vectorized<T> angle() const {
|
| 326 |
+
__m256 lo, hi;
|
| 327 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 328 |
+
auto angle_lambda = [](__m256 values_2) {
|
| 329 |
+
const auto zero_vec = _mm256_set1_ps(0.f);
|
| 330 |
+
const auto nan_vec = _mm256_set1_ps(NAN);
|
| 331 |
+
const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ);
|
| 332 |
+
const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
|
| 333 |
+
const auto pi = _mm256_set1_ps(c10::pi<float>);
|
| 334 |
+
|
| 335 |
+
const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ);
|
| 336 |
+
auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
|
| 337 |
+
angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
|
| 338 |
+
return angle;
|
| 339 |
+
};
|
| 340 |
+
auto o1 = angle_lambda(lo);
|
| 341 |
+
auto o2 = angle_lambda(hi);
|
| 342 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 343 |
+
}
|
| 344 |
+
Vectorized<T> real() const {
|
| 345 |
+
return *this;
|
| 346 |
+
}
|
| 347 |
+
Vectorized<T> imag() const {
|
| 348 |
+
return _mm256_set1_epi16(0);
|
| 349 |
+
}
|
| 350 |
+
Vectorized<T> conj() const {
|
| 351 |
+
return *this;
|
| 352 |
+
}
|
| 353 |
+
Vectorized<T> acos() const {
|
| 354 |
+
return map(Sleef_acosf8_u10);
|
| 355 |
+
}
|
| 356 |
+
Vectorized<T> acosh() const {
|
| 357 |
+
return map(Sleef_acoshf8_u10);
|
| 358 |
+
}
|
| 359 |
+
Vectorized<T> asin() const {
|
| 360 |
+
return map(Sleef_asinf8_u10);
|
| 361 |
+
}
|
| 362 |
+
Vectorized<T> atan() const {
|
| 363 |
+
return map(Sleef_atanf8_u10);
|
| 364 |
+
}
|
| 365 |
+
Vectorized<T> atanh() const {
|
| 366 |
+
return map(Sleef_atanhf8_u10);
|
| 367 |
+
}
|
| 368 |
+
Vectorized<T> atan2(const Vectorized<T> &b) const {
|
| 369 |
+
__m256 lo, hi;
|
| 370 |
+
__m256 b1, b2;
|
| 371 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 372 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 373 |
+
auto o1 = Sleef_atan2f8_u10(lo, b1);
|
| 374 |
+
auto o2 = Sleef_atan2f8_u10(hi, b2);
|
| 375 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 376 |
+
}
|
| 377 |
+
Vectorized<T> copysign(const Vectorized<T> &sign) const {
|
| 378 |
+
// copy sign bit (0x8000) from sign and remaining bits from values
|
| 379 |
+
__m256i mask_value = _mm256_set1_epi32(~0x80008000);
|
| 380 |
+
__m256i mask_signbit = _mm256_set1_epi32(0x80008000);
|
| 381 |
+
return Vectorized<T>(
|
| 382 |
+
_mm256_or_si256(
|
| 383 |
+
_mm256_and_si256(values, mask_value),
|
| 384 |
+
_mm256_and_si256(sign, mask_signbit)));
|
| 385 |
+
}
|
| 386 |
+
Vectorized<T> erf() const {
|
| 387 |
+
return map(Sleef_erff8_u10);
|
| 388 |
+
}
|
| 389 |
+
Vectorized<T> erfc() const {
|
| 390 |
+
return map(Sleef_erfcf8_u15);
|
| 391 |
+
}
|
| 392 |
+
Vectorized<T> erfinv() const {
|
| 393 |
+
__m256 lo, hi;
|
| 394 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 395 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 396 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 397 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 398 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 399 |
+
tmp1[i] = calc_erfinv(tmp1[i]);
|
| 400 |
+
tmp2[i] = calc_erfinv(tmp2[i]);
|
| 401 |
+
}
|
| 402 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
| 403 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
| 404 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 405 |
+
}
|
| 406 |
+
Vectorized<T> exp() const {
|
| 407 |
+
return map(Sleef_expf8_u10);
|
| 408 |
+
}
|
| 409 |
+
Vectorized<T> exp2() const {
|
| 410 |
+
return map(Sleef_exp2f8_u10);
|
| 411 |
+
}
|
| 412 |
+
Vectorized<T> expm1() const {
|
| 413 |
+
return map(Sleef_expm1f8_u10);
|
| 414 |
+
}
|
| 415 |
+
Vectorized<T> exp_u20() const {
|
| 416 |
+
return exp();
|
| 417 |
+
}
|
| 418 |
+
Vectorized<T> fmod(const Vectorized<T> & q) const {
|
| 419 |
+
__m256 x_lo, x_hi;
|
| 420 |
+
cvt_to_fp32<T>(values, x_lo, x_hi);
|
| 421 |
+
__m256 q_lo, q_hi;
|
| 422 |
+
cvt_to_fp32<T>(q.values, q_lo, q_hi);
|
| 423 |
+
auto o1 = Sleef_fmodf8(x_lo, q_lo);
|
| 424 |
+
auto o2 = Sleef_fmodf8(x_hi, q_hi);
|
| 425 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 426 |
+
}
|
| 427 |
+
Vectorized<T> hypot(const Vectorized<T> &b) const {
|
| 428 |
+
__m256 lo, hi;
|
| 429 |
+
__m256 b1, b2;
|
| 430 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 431 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 432 |
+
auto o1 = Sleef_hypotf8_u05(lo, b1);
|
| 433 |
+
auto o2 = Sleef_hypotf8_u05(hi, b2);
|
| 434 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 435 |
+
}
|
| 436 |
+
Vectorized<T> i0() const {
|
| 437 |
+
__m256 lo, hi;
|
| 438 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 439 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 440 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 441 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 442 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 443 |
+
tmp1[i] = calc_i0(tmp1[i]);
|
| 444 |
+
tmp2[i] = calc_i0(tmp2[i]);
|
| 445 |
+
}
|
| 446 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
| 447 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
| 448 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 449 |
+
}
|
| 450 |
+
Vectorized<T> i0e() const {
|
| 451 |
+
__m256 lo, hi;
|
| 452 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 453 |
+
constexpr auto sz = size();
|
| 454 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 455 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 456 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 457 |
+
|
| 458 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 459 |
+
tmp1[i] = calc_i0e(tmp1[i]);
|
| 460 |
+
tmp2[i] = calc_i0e(tmp2[i]);
|
| 461 |
+
}
|
| 462 |
+
const auto o1 = _mm256_loadu_ps(tmp1);
|
| 463 |
+
const auto o2 = _mm256_loadu_ps(tmp2);
|
| 464 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 465 |
+
}
|
| 466 |
+
Vectorized<T> digamma() const {
|
| 467 |
+
__m256 lo, hi;
|
| 468 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 469 |
+
constexpr auto sz = size();
|
| 470 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 471 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 472 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 473 |
+
|
| 474 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 475 |
+
tmp1[i] = calc_digamma(tmp1[i]);
|
| 476 |
+
tmp2[i] = calc_digamma(tmp2[i]);
|
| 477 |
+
}
|
| 478 |
+
const auto o1 = _mm256_loadu_ps(tmp1);
|
| 479 |
+
const auto o2 = _mm256_loadu_ps(tmp2);
|
| 480 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 481 |
+
}
|
| 482 |
+
Vectorized<T> igamma(const Vectorized<T> &x) const {
|
| 483 |
+
__m256 lo, hi;
|
| 484 |
+
__m256 xlo, xhi;
|
| 485 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 486 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 487 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 488 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 489 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 490 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 491 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 492 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 493 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 494 |
+
tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
|
| 495 |
+
tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
|
| 496 |
+
}
|
| 497 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
| 498 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
| 499 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
Vectorized<T> igammac(const Vectorized<T> &x) const {
|
| 503 |
+
__m256 lo, hi;
|
| 504 |
+
__m256 xlo, xhi;
|
| 505 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 506 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 507 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 508 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 509 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 510 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 511 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 512 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 513 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 514 |
+
tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
|
| 515 |
+
tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
|
| 516 |
+
}
|
| 517 |
+
auto o1 = _mm256_loadu_ps(tmp1);
|
| 518 |
+
auto o2 = _mm256_loadu_ps(tmp2);
|
| 519 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 520 |
+
}
|
| 521 |
+
Vectorized<T> log() const {
|
| 522 |
+
return map(Sleef_logf8_u10);
|
| 523 |
+
}
|
| 524 |
+
Vectorized<T> log2() const {
|
| 525 |
+
return map(Sleef_log2f8_u10);
|
| 526 |
+
}
|
| 527 |
+
Vectorized<T> log10() const {
|
| 528 |
+
return map(Sleef_log10f8_u10);
|
| 529 |
+
}
|
| 530 |
+
Vectorized<T> log1p() const {
|
| 531 |
+
return map(Sleef_log1pf8_u10);
|
| 532 |
+
}
|
| 533 |
+
Vectorized<T> sin() const {
|
| 534 |
+
return map(Sleef_sinf8_u10);
|
| 535 |
+
}
|
| 536 |
+
Vectorized<T> sinh() const {
|
| 537 |
+
return map(Sleef_sinhf8_u10);
|
| 538 |
+
}
|
| 539 |
+
Vectorized<T> cos() const {
|
| 540 |
+
return map(Sleef_cosf8_u10);
|
| 541 |
+
}
|
| 542 |
+
Vectorized<T> cosh() const {
|
| 543 |
+
return map(Sleef_coshf8_u10);
|
| 544 |
+
}
|
| 545 |
+
Vectorized<T> ceil() const {
|
| 546 |
+
__m256 lo, hi;
|
| 547 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 548 |
+
auto o1 = _mm256_ceil_ps(lo);
|
| 549 |
+
auto o2 = _mm256_ceil_ps(hi);
|
| 550 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 551 |
+
}
|
| 552 |
+
Vectorized<T> floor() const {
|
| 553 |
+
__m256 lo, hi;
|
| 554 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 555 |
+
auto o1 = _mm256_floor_ps(lo);
|
| 556 |
+
auto o2 = _mm256_floor_ps(hi);
|
| 557 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 558 |
+
}
|
| 559 |
+
Vectorized<T> neg() const {
|
| 560 |
+
return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000));
|
| 561 |
+
}
|
| 562 |
+
Vectorized<T> round() const {
|
| 563 |
+
__m256 lo, hi;
|
| 564 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 565 |
+
auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 566 |
+
auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 567 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 568 |
+
}
|
| 569 |
+
Vectorized<T> tan() const {
|
| 570 |
+
return map(Sleef_tanf8_u10);
|
| 571 |
+
}
|
| 572 |
+
Vectorized<T> tanh() const {
|
| 573 |
+
return map(Sleef_tanhf8_u10);
|
| 574 |
+
}
|
| 575 |
+
Vectorized<T> trunc() const {
|
| 576 |
+
__m256 lo, hi;
|
| 577 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 578 |
+
auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 579 |
+
auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 580 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 581 |
+
}
|
| 582 |
+
Vectorized<T> lgamma() const {
|
| 583 |
+
return map(Sleef_lgammaf8_u10);
|
| 584 |
+
}
|
| 585 |
+
Vectorized<T> sqrt() const {
|
| 586 |
+
__m256 lo, hi;
|
| 587 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 588 |
+
auto o1 = _mm256_sqrt_ps(lo);
|
| 589 |
+
auto o2 = _mm256_sqrt_ps(hi);
|
| 590 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 591 |
+
}
|
| 592 |
+
Vectorized<T> reciprocal() const {
|
| 593 |
+
__m256 lo, hi;
|
| 594 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 595 |
+
auto ones = _mm256_set1_ps(1);
|
| 596 |
+
auto o1 = _mm256_div_ps(ones, lo);
|
| 597 |
+
auto o2 = _mm256_div_ps(ones, hi);
|
| 598 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 599 |
+
}
|
| 600 |
+
Vectorized<T> rsqrt() const {
|
| 601 |
+
__m256 lo, hi;
|
| 602 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 603 |
+
auto ones = _mm256_set1_ps(1);
|
| 604 |
+
auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo));
|
| 605 |
+
auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi));
|
| 606 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 607 |
+
}
|
| 608 |
+
Vectorized<T> pow(const Vectorized<T> &b) const {
|
| 609 |
+
__m256 lo, hi;
|
| 610 |
+
__m256 b1, b2;
|
| 611 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 612 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 613 |
+
auto o1 = Sleef_powf8_u10(lo, b1);
|
| 614 |
+
auto o2 = Sleef_powf8_u10(hi, b2);
|
| 615 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 616 |
+
}
|
| 617 |
+
private:
|
| 618 |
+
template<typename Op>
|
| 619 |
+
Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
|
| 620 |
+
__m256 a_lo, a_hi;
|
| 621 |
+
__m256 b_lo, b_hi;
|
| 622 |
+
cvt_to_fp32<T>(values, a_lo, a_hi);
|
| 623 |
+
cvt_to_fp32<T>(b.values, b_lo, b_hi);
|
| 624 |
+
auto o1 = op(a_lo, b_lo);
|
| 625 |
+
auto o2 = op(a_hi, b_hi);
|
| 626 |
+
return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
public:
|
| 630 |
+
Vectorized<T> inline operator>(const Vectorized<T>& other) const {
|
| 631 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); });
|
| 632 |
+
}
|
| 633 |
+
Vectorized<T> inline operator<(const Vectorized<T>& other) const {
|
| 634 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); });
|
| 635 |
+
}
|
| 636 |
+
Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
|
| 637 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); });
|
| 638 |
+
}
|
| 639 |
+
Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
|
| 640 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); });
|
| 641 |
+
}
|
| 642 |
+
Vectorized<T> inline operator==(const Vectorized<T>& other) const {
|
| 643 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); });
|
| 644 |
+
}
|
| 645 |
+
Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
|
| 646 |
+
return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); });
|
| 647 |
+
}
|
| 648 |
+
};
|
| 649 |
+
|
| 650 |
+
template<typename T, typename Op>
|
| 651 |
+
static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 652 |
+
__m256 a_lo, a_hi;
|
| 653 |
+
__m256 b_lo, b_hi;
|
| 654 |
+
cvt_to_fp32<T>(__m256i(a), a_lo, a_hi);
|
| 655 |
+
cvt_to_fp32<T>(__m256i(b), b_lo, b_hi);
|
| 656 |
+
auto o1 = op(a_lo, b_lo);
|
| 657 |
+
auto o2 = op(a_hi, b_hi);
|
| 658 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
template <>
|
| 662 |
+
class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
|
| 663 |
+
public:
|
| 664 |
+
using Vectorized16::Vectorized16;
|
| 665 |
+
|
| 666 |
+
Vectorized<BFloat16> frac() const;
|
| 667 |
+
|
| 668 |
+
Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
|
| 669 |
+
Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
|
| 670 |
+
Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
|
| 671 |
+
Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
|
| 672 |
+
Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
|
| 673 |
+
Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
|
| 674 |
+
};
|
| 675 |
+
|
| 676 |
+
Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 677 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
|
| 678 |
+
}
|
| 679 |
+
Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 680 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
|
| 681 |
+
}
|
| 682 |
+
Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 683 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
|
| 684 |
+
}
|
| 685 |
+
Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 686 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
|
| 687 |
+
}
|
| 688 |
+
Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 689 |
+
return _mm256_and_si256(a, b);
|
| 690 |
+
}
|
| 691 |
+
Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 692 |
+
return _mm256_or_si256(a, b);
|
| 693 |
+
}
|
| 694 |
+
Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 695 |
+
return _mm256_xor_si256(a, b);
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
|
| 699 |
+
return (*this == other) & Vectorized<BFloat16>(1.0f);
|
| 700 |
+
}
|
| 701 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
|
| 702 |
+
return (*this != other) & Vectorized<BFloat16>(1.0f);
|
| 703 |
+
}
|
| 704 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
|
| 705 |
+
return (*this > other) & Vectorized<BFloat16>(1.0f);
|
| 706 |
+
}
|
| 707 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
|
| 708 |
+
return (*this >= other) & Vectorized<BFloat16>(1.0f);
|
| 709 |
+
}
|
| 710 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
|
| 711 |
+
return (*this < other) & Vectorized<BFloat16>(1.0f);
|
| 712 |
+
}
|
| 713 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
|
| 714 |
+
return (*this <= other) & Vectorized<BFloat16>(1.0f);
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
// frac. Implement this here so we can use subtraction
|
| 718 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
|
| 719 |
+
return *this - this->trunc();
|
| 720 |
+
}
|
| 721 |
+
|
| 722 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 723 |
+
// either input is a NaN.
|
| 724 |
+
template <>
|
| 725 |
+
Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 726 |
+
__m256 a_lo, a_hi;
|
| 727 |
+
__m256 b_lo, b_hi;
|
| 728 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 729 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
| 730 |
+
auto max_lo = _mm256_max_ps(a_lo, b_lo);
|
| 731 |
+
auto max_hi = _mm256_max_ps(a_hi, b_hi);
|
| 732 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
| 733 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
| 734 |
+
// Exploit the fact that all-ones is a NaN.
|
| 735 |
+
auto o1 = _mm256_or_ps(max_lo, nan_lo);
|
| 736 |
+
auto o2 = _mm256_or_ps(max_hi, nan_hi);
|
| 737 |
+
return cvtfp32_bf16(o1, o2);
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 741 |
+
// either input is a NaN.
|
| 742 |
+
template <>
|
| 743 |
+
Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 744 |
+
__m256 a_lo, a_hi;
|
| 745 |
+
__m256 b_lo, b_hi;
|
| 746 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 747 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
| 748 |
+
auto min_lo = _mm256_min_ps(a_lo, b_lo);
|
| 749 |
+
auto min_hi = _mm256_min_ps(a_hi, b_hi);
|
| 750 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
| 751 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
| 752 |
+
// Exploit the fact that all-ones is a NaN.
|
| 753 |
+
auto o1 = _mm256_or_ps(min_lo, nan_lo);
|
| 754 |
+
auto o2 = _mm256_or_ps(min_hi, nan_hi);
|
| 755 |
+
return cvtfp32_bf16(o1, o2);
|
| 756 |
+
}
|
| 757 |
+
|
| 758 |
+
template <>
|
| 759 |
+
Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
|
| 760 |
+
const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
|
| 761 |
+
__m256 a_lo, a_hi;
|
| 762 |
+
__m256 min_lo, min_hi;
|
| 763 |
+
__m256 max_lo, max_hi;
|
| 764 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 765 |
+
cvtbf16_fp32(__m256i(min), min_lo, min_hi);
|
| 766 |
+
cvtbf16_fp32(__m256i(max), max_lo, max_hi);
|
| 767 |
+
auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
|
| 768 |
+
auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
|
| 769 |
+
return cvtfp32_bf16(o1, o2);
|
| 770 |
+
}
|
| 771 |
+
|
| 772 |
+
template <>
|
| 773 |
+
Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
|
| 774 |
+
__m256 a_lo, a_hi;
|
| 775 |
+
__m256 max_lo, max_hi;
|
| 776 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 777 |
+
cvtbf16_fp32(__m256i(max), max_lo, max_hi);
|
| 778 |
+
auto o1 = _mm256_min_ps(max_lo, a_lo);
|
| 779 |
+
auto o2 = _mm256_min_ps(max_hi, a_hi);
|
| 780 |
+
return cvtfp32_bf16(o1, o2);
|
| 781 |
+
}
|
| 782 |
+
|
| 783 |
+
template <>
|
| 784 |
+
Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
|
| 785 |
+
__m256 a_lo, a_hi;
|
| 786 |
+
__m256 min_lo, min_hi;
|
| 787 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 788 |
+
cvtbf16_fp32(__m256i(min), min_lo, min_hi);
|
| 789 |
+
auto o1 = _mm256_max_ps(min_lo, a_lo);
|
| 790 |
+
auto o2 = _mm256_max_ps(min_hi, a_hi);
|
| 791 |
+
return cvtfp32_bf16(o1, o2);
|
| 792 |
+
}
|
| 793 |
+
|
| 794 |
+
template <>
|
| 795 |
+
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
| 796 |
+
int64_t i;
|
| 797 |
+
#pragma unroll
|
| 798 |
+
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
| 799 |
+
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
| 800 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
| 801 |
+
}
|
| 802 |
+
#pragma unroll
|
| 803 |
+
for (; i < n; i++) {
|
| 804 |
+
dst[i] = src[i];
|
| 805 |
+
}
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
template <>
|
| 809 |
+
inline void convert(const float* src, BFloat16* dst, int64_t n) {
|
| 810 |
+
int64_t i;
|
| 811 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 812 |
+
__m256 a = _mm256_loadu_ps(&src[i]);
|
| 813 |
+
__m256 b = _mm256_loadu_ps(&src[i + 8]);
|
| 814 |
+
|
| 815 |
+
__m256i bf = cvtfp32_bf16(a, b);
|
| 816 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
|
| 817 |
+
}
|
| 818 |
+
for (; i < n; i++) {
|
| 819 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 820 |
+
}
|
| 821 |
+
}
|
| 822 |
+
|
| 823 |
+
template <>
|
| 824 |
+
inline void convert(const double* src, BFloat16* dst, int64_t n) {
|
| 825 |
+
auto load_float = [](const double *src) -> __m256 {
|
| 826 |
+
// Load one float vector from an array of doubles
|
| 827 |
+
__m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
|
| 828 |
+
__m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
|
| 829 |
+
return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
|
| 830 |
+
};
|
| 831 |
+
|
| 832 |
+
int64_t i;
|
| 833 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 834 |
+
__m256 a = load_float(&src[i]);
|
| 835 |
+
__m256 b = load_float(&src[i + 8]);
|
| 836 |
+
|
| 837 |
+
__m256i bf = cvtfp32_bf16(a, b);
|
| 838 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
|
| 839 |
+
}
|
| 840 |
+
for (; i < n; i++) {
|
| 841 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 842 |
+
}
|
| 843 |
+
}
|
| 844 |
+
|
| 845 |
+
template <>
|
| 846 |
+
Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
|
| 847 |
+
const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
|
| 848 |
+
__m256 a_lo, a_hi;
|
| 849 |
+
__m256 b_lo, b_hi;
|
| 850 |
+
__m256 c_lo, c_hi;
|
| 851 |
+
cvtbf16_fp32(__m256i(a), a_lo, a_hi);
|
| 852 |
+
cvtbf16_fp32(__m256i(b), b_lo, b_hi);
|
| 853 |
+
cvtbf16_fp32(__m256i(c), c_lo, c_hi);
|
| 854 |
+
auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
|
| 855 |
+
auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
|
| 856 |
+
return cvtfp32_bf16(o1, o2);
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
template <>
|
| 860 |
+
class Vectorized<Half>: public Vectorized16<Half> {
|
| 861 |
+
public:
|
| 862 |
+
using Vectorized16::Vectorized16;
|
| 863 |
+
|
| 864 |
+
Vectorized<Half> frac() const;
|
| 865 |
+
|
| 866 |
+
Vectorized<Half> eq(const Vectorized<Half>& other) const;
|
| 867 |
+
Vectorized<Half> ne(const Vectorized<Half>& other) const;
|
| 868 |
+
Vectorized<Half> gt(const Vectorized<Half>& other) const;
|
| 869 |
+
Vectorized<Half> ge(const Vectorized<Half>& other) const;
|
| 870 |
+
Vectorized<Half> lt(const Vectorized<Half>& other) const;
|
| 871 |
+
Vectorized<Half> le(const Vectorized<Half>& other) const;
|
| 872 |
+
};
|
| 873 |
+
|
| 874 |
+
Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 875 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
|
| 876 |
+
}
|
| 877 |
+
Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 878 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
|
| 879 |
+
}
|
| 880 |
+
Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 881 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
|
| 882 |
+
}
|
| 883 |
+
Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 884 |
+
return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
|
| 885 |
+
}
|
| 886 |
+
Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 887 |
+
return _mm256_and_si256(a, b);
|
| 888 |
+
}
|
| 889 |
+
Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 890 |
+
return _mm256_or_si256(a, b);
|
| 891 |
+
}
|
| 892 |
+
Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 893 |
+
return _mm256_xor_si256(a, b);
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
|
| 897 |
+
return (*this == other) & Vectorized<Half>(1.0f);
|
| 898 |
+
}
|
| 899 |
+
inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
|
| 900 |
+
return (*this != other) & Vectorized<Half>(1.0f);
|
| 901 |
+
}
|
| 902 |
+
inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
|
| 903 |
+
return (*this > other) & Vectorized<Half>(1.0f);
|
| 904 |
+
}
|
| 905 |
+
inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
|
| 906 |
+
return (*this >= other) & Vectorized<Half>(1.0f);
|
| 907 |
+
}
|
| 908 |
+
inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
|
| 909 |
+
return (*this < other) & Vectorized<Half>(1.0f);
|
| 910 |
+
}
|
| 911 |
+
inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
|
| 912 |
+
return (*this <= other) & Vectorized<Half>(1.0f);
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
// frac. Implement this here so we can use subtraction
|
| 916 |
+
inline Vectorized<Half> Vectorized<Half>::frac() const {
|
| 917 |
+
return *this - this->trunc();
|
| 918 |
+
}
|
| 919 |
+
|
| 920 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 921 |
+
// either input is a NaN.
|
| 922 |
+
template <>
|
| 923 |
+
Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 924 |
+
__m256 a_lo, a_hi;
|
| 925 |
+
__m256 b_lo, b_hi;
|
| 926 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 927 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
| 928 |
+
auto max_lo = _mm256_max_ps(a_lo, b_lo);
|
| 929 |
+
auto max_hi = _mm256_max_ps(a_hi, b_hi);
|
| 930 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
| 931 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
| 932 |
+
// Exploit the fact that all-ones is a NaN.
|
| 933 |
+
auto o1 = _mm256_or_ps(max_lo, nan_lo);
|
| 934 |
+
auto o2 = _mm256_or_ps(max_hi, nan_hi);
|
| 935 |
+
return cvtfp32_fp16(o1, o2);
|
| 936 |
+
}
|
| 937 |
+
|
| 938 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 939 |
+
// either input is a NaN.
|
| 940 |
+
template <>
|
| 941 |
+
Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 942 |
+
__m256 a_lo, a_hi;
|
| 943 |
+
__m256 b_lo, b_hi;
|
| 944 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 945 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
| 946 |
+
auto min_lo = _mm256_min_ps(a_lo, b_lo);
|
| 947 |
+
auto min_hi = _mm256_min_ps(a_hi, b_hi);
|
| 948 |
+
auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
|
| 949 |
+
auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
|
| 950 |
+
// Exploit the fact that all-ones is a NaN.
|
| 951 |
+
auto o1 = _mm256_or_ps(min_lo, nan_lo);
|
| 952 |
+
auto o2 = _mm256_or_ps(min_hi, nan_hi);
|
| 953 |
+
return cvtfp32_fp16(o1, o2);
|
| 954 |
+
}
|
| 955 |
+
|
| 956 |
+
template <>
|
| 957 |
+
Vectorized<Half> inline clamp(const Vectorized<Half>& a,
|
| 958 |
+
const Vectorized<Half>& min, const Vectorized<Half>& max) {
|
| 959 |
+
__m256 a_lo, a_hi;
|
| 960 |
+
__m256 min_lo, min_hi;
|
| 961 |
+
__m256 max_lo, max_hi;
|
| 962 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 963 |
+
cvtfp16_fp32(__m256i(min), min_lo, min_hi);
|
| 964 |
+
cvtfp16_fp32(__m256i(max), max_lo, max_hi);
|
| 965 |
+
auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
|
| 966 |
+
auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
|
| 967 |
+
return cvtfp32_fp16(o1, o2);
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
template <>
|
| 971 |
+
Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
|
| 972 |
+
__m256 a_lo, a_hi;
|
| 973 |
+
__m256 max_lo, max_hi;
|
| 974 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 975 |
+
cvtfp16_fp32(__m256i(max), max_lo, max_hi);
|
| 976 |
+
auto o1 = _mm256_min_ps(max_lo, a_lo);
|
| 977 |
+
auto o2 = _mm256_min_ps(max_hi, a_hi);
|
| 978 |
+
return cvtfp32_fp16(o1, o2);
|
| 979 |
+
}
|
| 980 |
+
|
| 981 |
+
template <>
|
| 982 |
+
Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
|
| 983 |
+
__m256 a_lo, a_hi;
|
| 984 |
+
__m256 min_lo, min_hi;
|
| 985 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 986 |
+
cvtfp16_fp32(__m256i(min), min_lo, min_hi);
|
| 987 |
+
auto o1 = _mm256_max_ps(min_lo, a_lo);
|
| 988 |
+
auto o2 = _mm256_max_ps(min_hi, a_hi);
|
| 989 |
+
return cvtfp32_fp16(o1, o2);
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
template <>
|
| 993 |
+
inline void convert(const Half* src, Half* dst, int64_t n) {
|
| 994 |
+
int64_t i;
|
| 995 |
+
#pragma unroll
|
| 996 |
+
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
| 997 |
+
auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
|
| 998 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
|
| 999 |
+
}
|
| 1000 |
+
#pragma unroll
|
| 1001 |
+
for (; i < n; i++) {
|
| 1002 |
+
dst[i] = src[i];
|
| 1003 |
+
}
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
template <>
|
| 1007 |
+
inline void convert(const float* src, Half* dst, int64_t n) {
|
| 1008 |
+
int64_t i;
|
| 1009 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1010 |
+
__m256 a = _mm256_loadu_ps(&src[i]);
|
| 1011 |
+
__m256 b = _mm256_loadu_ps(&src[i + 8]);
|
| 1012 |
+
|
| 1013 |
+
__m256i c = cvtfp32_fp16(a, b);
|
| 1014 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
|
| 1015 |
+
}
|
| 1016 |
+
for (; i < n; i++) {
|
| 1017 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1018 |
+
}
|
| 1019 |
+
}
|
| 1020 |
+
|
| 1021 |
+
template <>
|
| 1022 |
+
inline void convert(const double* src, Half* dst, int64_t n) {
|
| 1023 |
+
auto load_float = [](const double *src) -> __m256 {
|
| 1024 |
+
// Load one float vector from an array of doubles
|
| 1025 |
+
__m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
|
| 1026 |
+
__m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
|
| 1027 |
+
return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
|
| 1028 |
+
};
|
| 1029 |
+
|
| 1030 |
+
int64_t i;
|
| 1031 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1032 |
+
__m256 a = load_float(&src[i]);
|
| 1033 |
+
__m256 b = load_float(&src[i + 8]);
|
| 1034 |
+
|
| 1035 |
+
__m256i c = cvtfp32_fp16(a, b);
|
| 1036 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
|
| 1037 |
+
}
|
| 1038 |
+
for (; i < n; i++) {
|
| 1039 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1040 |
+
}
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
template <>
|
| 1044 |
+
Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
|
| 1045 |
+
const Vectorized<Half>& b, const Vectorized<Half>& c) {
|
| 1046 |
+
__m256 a_lo, a_hi;
|
| 1047 |
+
__m256 b_lo, b_hi;
|
| 1048 |
+
__m256 c_lo, c_hi;
|
| 1049 |
+
cvtfp16_fp32(__m256i(a), a_lo, a_hi);
|
| 1050 |
+
cvtfp16_fp32(__m256i(b), b_lo, b_hi);
|
| 1051 |
+
cvtfp16_fp32(__m256i(c), c_lo, c_hi);
|
| 1052 |
+
auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
|
| 1053 |
+
auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
|
| 1054 |
+
return cvtfp32_fp16(o1, o2);
|
| 1055 |
+
}
|
| 1056 |
+
|
| 1057 |
+
#define CONVERT_VECTORIZED_INIT(type, name) \
|
| 1058 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1059 |
+
__m256 o1, o2; \
|
| 1060 |
+
cvt_to_fp32<type>(__m256i(a), o1, o2); \
|
| 1061 |
+
return std::make_tuple(o1, o2); \
|
| 1062 |
+
} \
|
| 1063 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1064 |
+
return cvt_from_fp32<type>(__m256(a), __m256(b)); \
|
| 1065 |
+
}
|
| 1066 |
+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1067 |
+
CONVERT_VECTORIZED_INIT(Half, half);
|
| 1068 |
+
|
| 1069 |
+
#else // defined(CPU_CAPABILITY_AVX2)
|
| 1070 |
+
|
| 1071 |
+
#define CONVERT_NON_VECTORIZED_INIT(type, name) \
|
| 1072 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1073 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1074 |
+
__at_align__ float arr[K]; \
|
| 1075 |
+
__at_align__ type arr2[K]; \
|
| 1076 |
+
a.store(arr2); \
|
| 1077 |
+
convert(arr2, arr, K); \
|
| 1078 |
+
return std::make_tuple( \
|
| 1079 |
+
Vectorized<float>::loadu(arr), \
|
| 1080 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
|
| 1081 |
+
} \
|
| 1082 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1083 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1084 |
+
__at_align__ float arr[K]; \
|
| 1085 |
+
__at_align__ type arr2[K]; \
|
| 1086 |
+
a.store(arr); \
|
| 1087 |
+
b.store(arr + Vectorized<float>::size()); \
|
| 1088 |
+
convert(arr, arr2, K); \
|
| 1089 |
+
return Vectorized<type>::loadu(arr2); \
|
| 1090 |
+
}
|
| 1091 |
+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1092 |
+
#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)
|
| 1093 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(const Vectorized<Half>& a) {
|
| 1094 |
+
static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
|
| 1095 |
+
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
| 1096 |
+
float16x8x2_t arr = a;
|
| 1097 |
+
float16x8_t x = arr.val[0];
|
| 1098 |
+
float16x8_t y = arr.val[1];
|
| 1099 |
+
#else
|
| 1100 |
+
auto arr = reinterpret_cast<const float16_t*>(a.operator const Half*());
|
| 1101 |
+
float16x8_t x = vld1q_f16(arr);
|
| 1102 |
+
float16x8_t y = vld1q_f16(arr + Vectorized<float>::size());
|
| 1103 |
+
#endif
|
| 1104 |
+
float32x4_t x1 = vcvt_f32_f16(vget_low_f16(x));
|
| 1105 |
+
float32x4_t x2 = vcvt_f32_f16(vget_high_f16(x));
|
| 1106 |
+
float32x4_t y1 = vcvt_f32_f16(vget_low_f16(y));
|
| 1107 |
+
float32x4_t y2 = vcvt_f32_f16(vget_high_f16(y));
|
| 1108 |
+
return { Vectorized<float>(x1, x2), Vectorized<float>(y1, y2) };
|
| 1109 |
+
}
|
| 1110 |
+
inline Vectorized<Half> convert_float_half(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 1111 |
+
static_assert(Vectorized<Half>::size() == 2 * Vectorized<float>::size());
|
| 1112 |
+
float32x4x2_t x = a;
|
| 1113 |
+
float32x4x2_t y = b;
|
| 1114 |
+
float16x4_t x1 = vcvt_f16_f32(x.val[0]);
|
| 1115 |
+
float16x4_t x2 = vcvt_f16_f32(x.val[1]);
|
| 1116 |
+
float16x4_t y1 = vcvt_f16_f32(y.val[0]);
|
| 1117 |
+
float16x4_t y2 = vcvt_f16_f32(y.val[1]);
|
| 1118 |
+
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
| 1119 |
+
return Vectorized<Half>(vcombine_f16(x1, x2), vcombine_f16(y1, y2));
|
| 1120 |
+
#else
|
| 1121 |
+
Vectorized<Half> rc;
|
| 1122 |
+
auto arr = reinterpret_cast<float16_t*>(rc.operator Half*());
|
| 1123 |
+
vst1q_f16(arr, vcombine_f16(x1, x2));
|
| 1124 |
+
vst1q_f16(arr + Vectorized<float>::size(), vcombine_f16(y1, y2));
|
| 1125 |
+
return rc;
|
| 1126 |
+
#endif
|
| 1127 |
+
}
|
| 1128 |
+
#else
|
| 1129 |
+
CONVERT_NON_VECTORIZED_INIT(Half, half);
|
| 1130 |
+
#endif
|
| 1131 |
+
|
| 1132 |
+
#endif // defined(CPU_CAPABILITY_AVX2)
|
| 1133 |
+
|
| 1134 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 1135 |
+
#define LOAD_FP32_VECTORIZED_INIT(type, name) \
|
| 1136 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1137 |
+
auto values = _mm_loadu_si128(reinterpret_cast<const __m128i*>(data)); \
|
| 1138 |
+
__m256 out_values; \
|
| 1139 |
+
cvt_to_fp32<type>(values, out_values); \
|
| 1140 |
+
out = out_values; \
|
| 1141 |
+
} \
|
| 1142 |
+
\
|
| 1143 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1144 |
+
auto vec = Vectorized<type>::loadu(data); \
|
| 1145 |
+
__m256 out1_values, out2_values; \
|
| 1146 |
+
cvt_to_fp32<type>(vec, out1_values, out2_values); \
|
| 1147 |
+
out1 = out1_values; \
|
| 1148 |
+
out2 = out2_values; \
|
| 1149 |
+
}
|
| 1150 |
+
LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
|
| 1151 |
+
LOAD_FP32_VECTORIZED_INIT(Half, fp16);
|
| 1152 |
+
|
| 1153 |
+
#else // defined(CPU_CAPABILITY_AVX2)
|
| 1154 |
+
#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
|
| 1155 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1156 |
+
__at_align__ float values[Vectorized<float>::size()]; \
|
| 1157 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) { \
|
| 1158 |
+
values[k] = data[k]; \
|
| 1159 |
+
} \
|
| 1160 |
+
out = Vectorized<float>::loadu(values); \
|
| 1161 |
+
} \
|
| 1162 |
+
\
|
| 1163 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1164 |
+
load_fp32_from_##name(data, out1); \
|
| 1165 |
+
data += Vectorized<float>::size(); \
|
| 1166 |
+
load_fp32_from_##name(data, out2); \
|
| 1167 |
+
}
|
| 1168 |
+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
|
| 1169 |
+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
|
| 1170 |
+
|
| 1171 |
+
#endif
|
| 1172 |
+
}} // namsepace at::vec::CPU_CAPABILITY
|
| 1173 |
+
|
| 1174 |
+
#pragma GCC diagnostic pop
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <c10/util/complex.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 10 |
+
|
| 11 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 12 |
+
#define SLEEF_STATIC_LIBS
|
| 13 |
+
#include <sleef.h>
|
| 14 |
+
#endif
|
| 15 |
+
|
| 16 |
+
namespace at::vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 21 |
+
|
| 22 |
+
template <> class Vectorized<c10::complex<double>> {
|
| 23 |
+
private:
|
| 24 |
+
__m256d values;
|
| 25 |
+
public:
|
| 26 |
+
using value_type = c10::complex<double>;
|
| 27 |
+
using size_type = int;
|
| 28 |
+
static constexpr size_type size() {
|
| 29 |
+
return 2;
|
| 30 |
+
}
|
| 31 |
+
Vectorized() {}
|
| 32 |
+
Vectorized(__m256d v) : values(v) {}
|
| 33 |
+
Vectorized(c10::complex<double> val) {
|
| 34 |
+
double real_value = val.real();
|
| 35 |
+
double imag_value = val.imag();
|
| 36 |
+
values = _mm256_setr_pd(real_value, imag_value,
|
| 37 |
+
real_value, imag_value);
|
| 38 |
+
}
|
| 39 |
+
Vectorized(c10::complex<double> val1, c10::complex<double> val2) {
|
| 40 |
+
values = _mm256_setr_pd(val1.real(), val1.imag(),
|
| 41 |
+
val2.real(), val2.imag());
|
| 42 |
+
}
|
| 43 |
+
operator __m256d() const {
|
| 44 |
+
return values;
|
| 45 |
+
}
|
| 46 |
+
template <int64_t mask>
|
| 47 |
+
static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 48 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 49 |
+
static_assert (mask > -1 && mask < 4, "Unexpected mask value");
|
| 50 |
+
switch (mask) {
|
| 51 |
+
case 0:
|
| 52 |
+
return a;
|
| 53 |
+
case 1:
|
| 54 |
+
return _mm256_blend_pd(a.values, b.values, 0x03);
|
| 55 |
+
case 2:
|
| 56 |
+
return _mm256_blend_pd(a.values, b.values, 0x0c);
|
| 57 |
+
case 3: break;
|
| 58 |
+
}
|
| 59 |
+
return b;
|
| 60 |
+
}
|
| 61 |
+
static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
|
| 62 |
+
const Vectorized<c10::complex<double>>& mask) {
|
| 63 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 64 |
+
auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values);
|
| 65 |
+
return _mm256_blendv_pd(a.values, b.values, mask_);
|
| 66 |
+
|
| 67 |
+
}
|
| 68 |
+
template<typename step_t>
|
| 69 |
+
static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0., step_t step = static_cast<step_t>(1)) {
|
| 70 |
+
return Vectorized<c10::complex<double>>(base,
|
| 71 |
+
base + step);
|
| 72 |
+
}
|
| 73 |
+
static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
|
| 74 |
+
int64_t count = size()) {
|
| 75 |
+
switch (count) {
|
| 76 |
+
case 0:
|
| 77 |
+
return a;
|
| 78 |
+
case 1:
|
| 79 |
+
return blend<1>(a, b);
|
| 80 |
+
}
|
| 81 |
+
return b;
|
| 82 |
+
}
|
| 83 |
+
static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
|
| 84 |
+
if (count == size())
|
| 85 |
+
return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
|
| 86 |
+
|
| 87 |
+
__at_align__ double tmp_values[2*size()];
|
| 88 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 89 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 90 |
+
// instructions while a loop would be compiled to one instruction.
|
| 91 |
+
for (const auto i : c10::irange(2*size())) {
|
| 92 |
+
tmp_values[i] = 0.0;
|
| 93 |
+
}
|
| 94 |
+
std::memcpy(
|
| 95 |
+
tmp_values,
|
| 96 |
+
reinterpret_cast<const double*>(ptr),
|
| 97 |
+
count * sizeof(c10::complex<double>));
|
| 98 |
+
return _mm256_load_pd(tmp_values);
|
| 99 |
+
}
|
| 100 |
+
void store(void* ptr, int count = size()) const {
|
| 101 |
+
if (count == size()) {
|
| 102 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
|
| 103 |
+
} else if (count > 0) {
|
| 104 |
+
double tmp_values[2*size()];
|
| 105 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
|
| 106 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
|
| 107 |
+
}
|
| 108 |
+
}
|
| 109 |
+
const c10::complex<double>& operator[](int idx) const = delete;
|
| 110 |
+
c10::complex<double>& operator[](int idx) = delete;
|
| 111 |
+
Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
|
| 112 |
+
__at_align__ c10::complex<double> tmp[size()];
|
| 113 |
+
store(tmp);
|
| 114 |
+
for (const auto i : c10::irange(size())) {
|
| 115 |
+
tmp[i] = f(tmp[i]);
|
| 116 |
+
}
|
| 117 |
+
return loadu(tmp);
|
| 118 |
+
}
|
| 119 |
+
__m256d abs_2_() const {
|
| 120 |
+
auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
|
| 121 |
+
return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
|
| 122 |
+
}
|
| 123 |
+
__m256d abs_() const {
|
| 124 |
+
auto real = _mm256_movedup_pd(values); // real real
|
| 125 |
+
// movehdup_pd does not exist...
|
| 126 |
+
auto imag = _mm256_permute_pd(values, 0xf); // imag imag
|
| 127 |
+
return Sleef_hypotd4_u05(real, imag); // abs abs
|
| 128 |
+
}
|
| 129 |
+
Vectorized<c10::complex<double>> abs() const {
|
| 130 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 131 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 132 |
+
return _mm256_and_pd(abs_(), real_mask); // abs 0
|
| 133 |
+
}
|
| 134 |
+
__m256d angle_() const {
|
| 135 |
+
//angle = atan2(b/a)
|
| 136 |
+
auto b_a = _mm256_permute_pd(values, 0x05); // b a
|
| 137 |
+
return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
|
| 138 |
+
}
|
| 139 |
+
Vectorized<c10::complex<double>> angle() const {
|
| 140 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 141 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 142 |
+
auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle
|
| 143 |
+
return _mm256_and_pd(angle, real_mask); // angle 0
|
| 144 |
+
}
|
| 145 |
+
Vectorized<c10::complex<double>> sgn() const {
|
| 146 |
+
auto abs = abs_();
|
| 147 |
+
auto zero = _mm256_setzero_pd();
|
| 148 |
+
auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ);
|
| 149 |
+
auto div = _mm256_div_pd(values, abs);
|
| 150 |
+
return _mm256_blendv_pd(div, zero, mask);
|
| 151 |
+
}
|
| 152 |
+
__m256d real_() const {
|
| 153 |
+
const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 154 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 155 |
+
return _mm256_and_pd(values, real_mask);
|
| 156 |
+
}
|
| 157 |
+
Vectorized<c10::complex<double>> real() const {
|
| 158 |
+
return real_();
|
| 159 |
+
}
|
| 160 |
+
__m256d imag_() const {
|
| 161 |
+
const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 162 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
|
| 163 |
+
return _mm256_and_pd(values, imag_mask);
|
| 164 |
+
}
|
| 165 |
+
Vectorized<c10::complex<double>> imag() const {
|
| 166 |
+
return _mm256_permute_pd(imag_(), 0x05); //b a
|
| 167 |
+
}
|
| 168 |
+
__m256d conj_() const {
|
| 169 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
| 170 |
+
return _mm256_xor_pd(values, sign_mask); // a -b
|
| 171 |
+
}
|
| 172 |
+
Vectorized<c10::complex<double>> conj() const {
|
| 173 |
+
return conj_();
|
| 174 |
+
}
|
| 175 |
+
Vectorized<c10::complex<double>> log() const {
|
| 176 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
| 177 |
+
return map(std::log);
|
| 178 |
+
}
|
| 179 |
+
Vectorized<c10::complex<double>> log2() const {
|
| 180 |
+
const __m256d log2_ = _mm256_set1_pd(std::log(2));
|
| 181 |
+
return _mm256_div_pd(log(), log2_);
|
| 182 |
+
}
|
| 183 |
+
Vectorized<c10::complex<double>> log10() const {
|
| 184 |
+
const __m256d log10_ = _mm256_set1_pd(std::log(10));
|
| 185 |
+
return _mm256_div_pd(log(), log10_);
|
| 186 |
+
}
|
| 187 |
+
Vectorized<c10::complex<double>> log1p() const {
|
| 188 |
+
return map(std::log1p);
|
| 189 |
+
}
|
| 190 |
+
Vectorized<c10::complex<double>> asin() const {
|
| 191 |
+
// asin(x)
|
| 192 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 193 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 194 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 195 |
+
const __m256d one = _mm256_set1_pd(1);
|
| 196 |
+
|
| 197 |
+
auto conj = conj_();
|
| 198 |
+
auto b_a = _mm256_permute_pd(conj, 0x05); //-b a
|
| 199 |
+
auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab
|
| 200 |
+
auto im = _mm256_add_pd(ab, ab); //-2ab -2ab
|
| 201 |
+
|
| 202 |
+
auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
|
| 203 |
+
auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a
|
| 204 |
+
re = _mm256_sub_pd(one, re);
|
| 205 |
+
|
| 206 |
+
auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im)
|
| 207 |
+
auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt())
|
| 208 |
+
return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln()
|
| 209 |
+
}
|
| 210 |
+
Vectorized<c10::complex<double>> acos() const {
|
| 211 |
+
// acos(x) = pi/2 - asin(x)
|
| 212 |
+
constexpr auto pi_2d = c10::pi<double> / 2;
|
| 213 |
+
const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0);
|
| 214 |
+
return _mm256_sub_pd(pi_2, asin());
|
| 215 |
+
}
|
| 216 |
+
Vectorized<c10::complex<double>> atan() const;
|
| 217 |
+
Vectorized<c10::complex<double>> atanh() const {
|
| 218 |
+
return map(std::atanh);
|
| 219 |
+
}
|
| 220 |
+
Vectorized<c10::complex<double>> exp() const {
|
| 221 |
+
//exp(a + bi)
|
| 222 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
| 223 |
+
auto exp = Sleef_expd4_u10(values); //exp(a) exp(b)
|
| 224 |
+
exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a)
|
| 225 |
+
|
| 226 |
+
auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
| 227 |
+
auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05),
|
| 228 |
+
sin_cos.x, 0x0A); //cos(b) sin(b)
|
| 229 |
+
return _mm256_mul_pd(exp, cos_sin);
|
| 230 |
+
}
|
| 231 |
+
Vectorized<c10::complex<double>> exp2() const {
|
| 232 |
+
// Use identity 2**x = exp(log(2) * x)
|
| 233 |
+
const __m256d ln_2 = _mm256_set1_pd(c10::ln_2<double>);
|
| 234 |
+
Vectorized<c10::complex<double>> scaled_values = _mm256_mul_pd(values, ln_2);
|
| 235 |
+
return scaled_values.exp();
|
| 236 |
+
}
|
| 237 |
+
Vectorized<c10::complex<double>> expm1() const {
|
| 238 |
+
return map(std::expm1);
|
| 239 |
+
}
|
| 240 |
+
Vectorized<c10::complex<double>> sin() const {
|
| 241 |
+
return map(std::sin);
|
| 242 |
+
}
|
| 243 |
+
Vectorized<c10::complex<double>> sinh() const {
|
| 244 |
+
return map(std::sinh);
|
| 245 |
+
}
|
| 246 |
+
Vectorized<c10::complex<double>> cos() const {
|
| 247 |
+
return map(std::cos);
|
| 248 |
+
}
|
| 249 |
+
Vectorized<c10::complex<double>> cosh() const {
|
| 250 |
+
return map(std::cosh);
|
| 251 |
+
}
|
| 252 |
+
Vectorized<c10::complex<double>> ceil() const {
|
| 253 |
+
return _mm256_ceil_pd(values);
|
| 254 |
+
}
|
| 255 |
+
Vectorized<c10::complex<double>> floor() const {
|
| 256 |
+
return _mm256_floor_pd(values);
|
| 257 |
+
}
|
| 258 |
+
Vectorized<c10::complex<double>> neg() const {
|
| 259 |
+
auto zero = _mm256_setzero_pd();
|
| 260 |
+
return _mm256_sub_pd(zero, values);
|
| 261 |
+
}
|
| 262 |
+
Vectorized<c10::complex<double>> round() const {
|
| 263 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 264 |
+
}
|
| 265 |
+
Vectorized<c10::complex<double>> tan() const {
|
| 266 |
+
return map(std::tan);
|
| 267 |
+
}
|
| 268 |
+
Vectorized<c10::complex<double>> tanh() const {
|
| 269 |
+
return map(std::tanh);
|
| 270 |
+
}
|
| 271 |
+
Vectorized<c10::complex<double>> trunc() const {
|
| 272 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 273 |
+
}
|
| 274 |
+
Vectorized<c10::complex<double>> sqrt() const {
|
| 275 |
+
return map(std::sqrt);
|
| 276 |
+
}
|
| 277 |
+
Vectorized<c10::complex<double>> reciprocal() const;
|
| 278 |
+
Vectorized<c10::complex<double>> rsqrt() const {
|
| 279 |
+
return sqrt().reciprocal();
|
| 280 |
+
}
|
| 281 |
+
Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
|
| 282 |
+
__at_align__ c10::complex<double> x_tmp[size()];
|
| 283 |
+
__at_align__ c10::complex<double> y_tmp[size()];
|
| 284 |
+
store(x_tmp);
|
| 285 |
+
exp.store(y_tmp);
|
| 286 |
+
for (const auto i : c10::irange(size())) {
|
| 287 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 288 |
+
}
|
| 289 |
+
return loadu(x_tmp);
|
| 290 |
+
}
|
| 291 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 292 |
+
// `O`: get false if an operand is NaN
|
| 293 |
+
// `Q`: do not raise if an operand is NaN
|
| 294 |
+
Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
|
| 295 |
+
return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
|
| 296 |
+
}
|
| 297 |
+
Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
|
| 298 |
+
return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
|
| 299 |
+
}
|
| 300 |
+
Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>&) const {
|
| 301 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 302 |
+
}
|
| 303 |
+
Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>&) const {
|
| 304 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 305 |
+
}
|
| 306 |
+
Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>&) const {
|
| 307 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 308 |
+
}
|
| 309 |
+
Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>&) const {
|
| 310 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
|
| 314 |
+
Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
|
| 315 |
+
};
|
| 316 |
+
|
| 317 |
+
template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
| 318 |
+
return _mm256_add_pd(a, b);
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
| 322 |
+
return _mm256_sub_pd(a, b);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
| 326 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 327 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
| 328 |
+
auto ac_bd = _mm256_mul_pd(a, b); //ac bd
|
| 329 |
+
|
| 330 |
+
auto d_c = _mm256_permute_pd(b, 0x05); //d c
|
| 331 |
+
d_c = _mm256_xor_pd(sign_mask, d_c); //d -c
|
| 332 |
+
auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc
|
| 333 |
+
|
| 334 |
+
auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
|
| 335 |
+
return ret;
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
|
| 339 |
+
//re + im*i = (a + bi) / (c + di)
|
| 340 |
+
auto mask = _mm256_set1_pd(-0.f);
|
| 341 |
+
auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d|
|
| 342 |
+
auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c|
|
| 343 |
+
auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
| 344 |
+
auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc
|
| 345 |
+
auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc
|
| 346 |
+
auto acbd2 = _mm256_mul_pd(a2, b2);
|
| 347 |
+
|
| 348 |
+
const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0);
|
| 349 |
+
auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc
|
| 350 |
+
dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
|
| 351 |
+
auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
|
| 352 |
+
auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
| 353 |
+
|
| 354 |
+
// get the denominator
|
| 355 |
+
auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 356 |
+
res2 = _mm256_div_pd(res2, denom2);
|
| 357 |
+
return res2;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
// reciprocal. Implement this here so we can use multiplication.
|
| 361 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
|
| 362 |
+
//re + im*i = (a + bi) / (c + di)
|
| 363 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
| 364 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
| 365 |
+
const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
|
| 366 |
+
auto c_d = _mm256_xor_pd(sign_mask, values); //c -d
|
| 367 |
+
return _mm256_div_pd(c_d, abs_2_());
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
|
| 371 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 372 |
+
const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0);
|
| 373 |
+
const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5);
|
| 374 |
+
|
| 375 |
+
auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b
|
| 376 |
+
auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b
|
| 377 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
| 378 |
+
return i_half*ln; // i/2*ln()
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
template <>
|
| 382 |
+
Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 383 |
+
auto abs_a = a.abs_2_();
|
| 384 |
+
auto abs_b = b.abs_2_();
|
| 385 |
+
auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ);
|
| 386 |
+
auto max = _mm256_blendv_pd(a, b, mask);
|
| 387 |
+
// Exploit the fact that all-ones is a NaN.
|
| 388 |
+
auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
|
| 389 |
+
return _mm256_or_pd(max, isnan);
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
template <>
|
| 393 |
+
Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 394 |
+
auto abs_a = a.abs_2_();
|
| 395 |
+
auto abs_b = b.abs_2_();
|
| 396 |
+
auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ);
|
| 397 |
+
auto min = _mm256_blendv_pd(a, b, mask);
|
| 398 |
+
// Exploit the fact that all-ones is a NaN.
|
| 399 |
+
auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
|
| 400 |
+
return _mm256_or_pd(min, isnan);
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
template <>
|
| 404 |
+
Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 405 |
+
return _mm256_and_pd(a, b);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
template <>
|
| 409 |
+
Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 410 |
+
return _mm256_or_pd(a, b);
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
template <>
|
| 414 |
+
Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
|
| 415 |
+
return _mm256_xor_pd(a, b);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
|
| 419 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 420 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 421 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
|
| 425 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 426 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 427 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
#endif
|
| 431 |
+
|
| 432 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <c10/util/complex.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at::vec {
|
| 16 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 17 |
+
inline namespace CPU_CAPABILITY {
|
| 18 |
+
|
| 19 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 20 |
+
|
| 21 |
+
template <> class Vectorized<c10::complex<float>> {
|
| 22 |
+
private:
|
| 23 |
+
__m256 values;
|
| 24 |
+
public:
|
| 25 |
+
using value_type = c10::complex<float>;
|
| 26 |
+
using size_type = int;
|
| 27 |
+
static constexpr size_type size() {
|
| 28 |
+
return 4;
|
| 29 |
+
}
|
| 30 |
+
Vectorized() {}
|
| 31 |
+
Vectorized(__m256 v) : values(v) {}
|
| 32 |
+
Vectorized(c10::complex<float> val) {
|
| 33 |
+
float real_value = val.real();
|
| 34 |
+
float imag_value = val.imag();
|
| 35 |
+
values = _mm256_setr_ps(real_value, imag_value,
|
| 36 |
+
real_value, imag_value,
|
| 37 |
+
real_value, imag_value,
|
| 38 |
+
real_value, imag_value
|
| 39 |
+
);
|
| 40 |
+
}
|
| 41 |
+
Vectorized(c10::complex<float> val1, c10::complex<float> val2, c10::complex<float> val3, c10::complex<float> val4) {
|
| 42 |
+
values = _mm256_setr_ps(val1.real(), val1.imag(),
|
| 43 |
+
val2.real(), val2.imag(),
|
| 44 |
+
val3.real(), val3.imag(),
|
| 45 |
+
val4.real(), val4.imag()
|
| 46 |
+
);
|
| 47 |
+
}
|
| 48 |
+
operator __m256() const {
|
| 49 |
+
return values;
|
| 50 |
+
}
|
| 51 |
+
template <int64_t mask>
|
| 52 |
+
static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 53 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 54 |
+
static_assert(mask > -1 && mask < 16, "Unexpected mask range");
|
| 55 |
+
switch (mask) {
|
| 56 |
+
case 0:
|
| 57 |
+
return a;
|
| 58 |
+
case 1:
|
| 59 |
+
return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011
|
| 60 |
+
case 2:
|
| 61 |
+
return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100
|
| 62 |
+
case 3:
|
| 63 |
+
return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111
|
| 64 |
+
case 4:
|
| 65 |
+
return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000
|
| 66 |
+
case 5:
|
| 67 |
+
return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011
|
| 68 |
+
case 6:
|
| 69 |
+
return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100
|
| 70 |
+
case 7:
|
| 71 |
+
return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111
|
| 72 |
+
case 8:
|
| 73 |
+
return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000
|
| 74 |
+
case 9:
|
| 75 |
+
return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011
|
| 76 |
+
case 10:
|
| 77 |
+
return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100
|
| 78 |
+
case 11:
|
| 79 |
+
return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111
|
| 80 |
+
case 12:
|
| 81 |
+
return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000
|
| 82 |
+
case 13:
|
| 83 |
+
return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011
|
| 84 |
+
case 14:
|
| 85 |
+
return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100
|
| 86 |
+
default: break;
|
| 87 |
+
}
|
| 88 |
+
return b;
|
| 89 |
+
}
|
| 90 |
+
static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
|
| 91 |
+
const Vectorized<c10::complex<float>>& mask) {
|
| 92 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 93 |
+
auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values);
|
| 94 |
+
return _mm256_blendv_ps(a.values, b.values, mask_);
|
| 95 |
+
|
| 96 |
+
}
|
| 97 |
+
template<typename step_t>
|
| 98 |
+
static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0., step_t step = static_cast<step_t>(1)) {
|
| 99 |
+
return Vectorized<c10::complex<float>>(base,
|
| 100 |
+
base + step,
|
| 101 |
+
base + c10::complex<float>(2)*step,
|
| 102 |
+
base + c10::complex<float>(3)*step);
|
| 103 |
+
}
|
| 104 |
+
static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
|
| 105 |
+
int64_t count = size()) {
|
| 106 |
+
switch (count) {
|
| 107 |
+
case 0:
|
| 108 |
+
return a;
|
| 109 |
+
case 1:
|
| 110 |
+
return blend<1>(a, b);
|
| 111 |
+
case 2:
|
| 112 |
+
return blend<3>(a, b);
|
| 113 |
+
case 3:
|
| 114 |
+
return blend<7>(a, b);
|
| 115 |
+
}
|
| 116 |
+
return b;
|
| 117 |
+
}
|
| 118 |
+
static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
|
| 119 |
+
if (count == size())
|
| 120 |
+
return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
|
| 121 |
+
|
| 122 |
+
__at_align__ float tmp_values[2*size()];
|
| 123 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 124 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 125 |
+
// instructions while a loop would be compiled to one instruction.
|
| 126 |
+
for (const auto i : c10::irange(2*size())) {
|
| 127 |
+
tmp_values[i] = 0.0;
|
| 128 |
+
}
|
| 129 |
+
std::memcpy(
|
| 130 |
+
tmp_values,
|
| 131 |
+
reinterpret_cast<const float*>(ptr),
|
| 132 |
+
count * sizeof(c10::complex<float>));
|
| 133 |
+
return _mm256_load_ps(tmp_values);
|
| 134 |
+
}
|
| 135 |
+
void store(void* ptr, int count = size()) const {
|
| 136 |
+
if (count == size()) {
|
| 137 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
|
| 138 |
+
} else if (count > 0) {
|
| 139 |
+
float tmp_values[2*size()];
|
| 140 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
|
| 141 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
const c10::complex<float>& operator[](int idx) const = delete;
|
| 145 |
+
c10::complex<float>& operator[](int idx) = delete;
|
| 146 |
+
Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
|
| 147 |
+
__at_align__ c10::complex<float> tmp[size()];
|
| 148 |
+
store(tmp);
|
| 149 |
+
for (const auto i : c10::irange(size())) {
|
| 150 |
+
tmp[i] = f(tmp[i]);
|
| 151 |
+
}
|
| 152 |
+
return loadu(tmp);
|
| 153 |
+
}
|
| 154 |
+
__m256 abs_2_() const {
|
| 155 |
+
auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
|
| 156 |
+
auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
|
| 157 |
+
return _mm256_permute_ps(ret, 0xD8);
|
| 158 |
+
}
|
| 159 |
+
__m256 abs_() const {
|
| 160 |
+
auto real = _mm256_moveldup_ps(values); // real real
|
| 161 |
+
auto imag = _mm256_movehdup_ps(values); // imag imag
|
| 162 |
+
return Sleef_hypotf8_u05(real, imag); // abs abs
|
| 163 |
+
}
|
| 164 |
+
Vectorized<c10::complex<float>> abs() const {
|
| 165 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 166 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 167 |
+
return _mm256_and_ps(abs_(), real_mask); // abs 0
|
| 168 |
+
}
|
| 169 |
+
__m256 angle_() const {
|
| 170 |
+
//angle = atan2(b/a)
|
| 171 |
+
auto b_a = _mm256_permute_ps(values, 0xB1); // b a
|
| 172 |
+
return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
|
| 173 |
+
}
|
| 174 |
+
Vectorized<c10::complex<float>> angle() const {
|
| 175 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 176 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 177 |
+
auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle
|
| 178 |
+
return _mm256_and_ps(angle, real_mask); // angle 0
|
| 179 |
+
}
|
| 180 |
+
Vectorized<c10::complex<float>> sgn() const {
|
| 181 |
+
auto abs = abs_();
|
| 182 |
+
auto zero = _mm256_setzero_ps();
|
| 183 |
+
auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ);
|
| 184 |
+
auto div = _mm256_div_ps(values, abs);
|
| 185 |
+
return _mm256_blendv_ps(div, zero, mask);
|
| 186 |
+
}
|
| 187 |
+
__m256 real_() const {
|
| 188 |
+
const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
|
| 189 |
+
0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
|
| 190 |
+
return _mm256_and_ps(values, real_mask);
|
| 191 |
+
}
|
| 192 |
+
Vectorized<c10::complex<float>> real() const {
|
| 193 |
+
return real_();
|
| 194 |
+
}
|
| 195 |
+
__m256 imag_() const {
|
| 196 |
+
const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
|
| 197 |
+
0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
|
| 198 |
+
return _mm256_and_ps(values, imag_mask);
|
| 199 |
+
}
|
| 200 |
+
Vectorized<c10::complex<float>> imag() const {
|
| 201 |
+
return _mm256_permute_ps(imag_(), 0xB1); //b a
|
| 202 |
+
}
|
| 203 |
+
__m256 conj_() const {
|
| 204 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 205 |
+
return _mm256_xor_ps(values, sign_mask); // a -b
|
| 206 |
+
}
|
| 207 |
+
Vectorized<c10::complex<float>> conj() const {
|
| 208 |
+
return conj_();
|
| 209 |
+
}
|
| 210 |
+
Vectorized<c10::complex<float>> log() const {
|
| 211 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
| 212 |
+
return map(std::log);
|
| 213 |
+
}
|
| 214 |
+
Vectorized<c10::complex<float>> log2() const {
|
| 215 |
+
const __m256 log2_ = _mm256_set1_ps(std::log(2));
|
| 216 |
+
return _mm256_div_ps(log(), log2_);
|
| 217 |
+
}
|
| 218 |
+
Vectorized<c10::complex<float>> log10() const {
|
| 219 |
+
const __m256 log10_ = _mm256_set1_ps(std::log(10));
|
| 220 |
+
return _mm256_div_ps(log(), log10_);
|
| 221 |
+
}
|
| 222 |
+
Vectorized<c10::complex<float>> log1p() const {
|
| 223 |
+
return map(std::log1p);
|
| 224 |
+
}
|
| 225 |
+
Vectorized<c10::complex<float>> asin() const {
|
| 226 |
+
// asin(x)
|
| 227 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 228 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 229 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 230 |
+
const __m256 one = _mm256_set1_ps(1);
|
| 231 |
+
|
| 232 |
+
auto conj = conj_();
|
| 233 |
+
auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a
|
| 234 |
+
auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab
|
| 235 |
+
auto im = _mm256_add_ps(ab, ab); //-2ab -2ab
|
| 236 |
+
|
| 237 |
+
auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
|
| 238 |
+
auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
|
| 239 |
+
re = _mm256_permute_ps(re, 0xD8);
|
| 240 |
+
re = _mm256_sub_ps(one, re);
|
| 241 |
+
|
| 242 |
+
auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im)
|
| 243 |
+
auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt())
|
| 244 |
+
return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
|
| 245 |
+
}
|
| 246 |
+
Vectorized<c10::complex<float>> acos() const {
|
| 247 |
+
return map(std::acos);
|
| 248 |
+
}
|
| 249 |
+
Vectorized<c10::complex<float>> atan() const;
|
| 250 |
+
Vectorized<c10::complex<float>> atanh() const {
|
| 251 |
+
return map(std::atanh);
|
| 252 |
+
}
|
| 253 |
+
Vectorized<c10::complex<float>> exp() const {
|
| 254 |
+
//exp(a + bi)
|
| 255 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
| 256 |
+
auto exp = Sleef_expf8_u10(values); //exp(a) exp(b)
|
| 257 |
+
exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a)
|
| 258 |
+
|
| 259 |
+
auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
| 260 |
+
auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1),
|
| 261 |
+
sin_cos.x, 0xAA); //cos(b) sin(b)
|
| 262 |
+
return _mm256_mul_ps(exp, cos_sin);
|
| 263 |
+
}
|
| 264 |
+
Vectorized<c10::complex<float>> exp2() const {
|
| 265 |
+
// Use identity 2**x = exp(log(2) * x)
|
| 266 |
+
const __m256 ln_2 = _mm256_set1_ps(c10::ln_2<float>);
|
| 267 |
+
Vectorized<c10::complex<float>> scaled_values = _mm256_mul_ps(values, ln_2);
|
| 268 |
+
return scaled_values.exp();
|
| 269 |
+
}
|
| 270 |
+
Vectorized<c10::complex<float>> expm1() const {
|
| 271 |
+
return map(std::expm1);
|
| 272 |
+
}
|
| 273 |
+
Vectorized<c10::complex<float>> sin() const {
|
| 274 |
+
return map(std::sin);
|
| 275 |
+
}
|
| 276 |
+
Vectorized<c10::complex<float>> sinh() const {
|
| 277 |
+
return map(std::sinh);
|
| 278 |
+
}
|
| 279 |
+
Vectorized<c10::complex<float>> cos() const {
|
| 280 |
+
return map(std::cos);
|
| 281 |
+
}
|
| 282 |
+
Vectorized<c10::complex<float>> cosh() const {
|
| 283 |
+
return map(std::cosh);
|
| 284 |
+
}
|
| 285 |
+
Vectorized<c10::complex<float>> ceil() const {
|
| 286 |
+
return _mm256_ceil_ps(values);
|
| 287 |
+
}
|
| 288 |
+
Vectorized<c10::complex<float>> floor() const {
|
| 289 |
+
return _mm256_floor_ps(values);
|
| 290 |
+
}
|
| 291 |
+
Vectorized<c10::complex<float>> neg() const {
|
| 292 |
+
auto zero = _mm256_setzero_ps();
|
| 293 |
+
return _mm256_sub_ps(zero, values);
|
| 294 |
+
}
|
| 295 |
+
Vectorized<c10::complex<float>> round() const {
|
| 296 |
+
return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 297 |
+
}
|
| 298 |
+
Vectorized<c10::complex<float>> tan() const {
|
| 299 |
+
return map(std::tan);
|
| 300 |
+
}
|
| 301 |
+
Vectorized<c10::complex<float>> tanh() const {
|
| 302 |
+
return map(std::tanh);
|
| 303 |
+
}
|
| 304 |
+
Vectorized<c10::complex<float>> trunc() const {
|
| 305 |
+
return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 306 |
+
}
|
| 307 |
+
Vectorized<c10::complex<float>> sqrt() const {
|
| 308 |
+
return map(std::sqrt);
|
| 309 |
+
}
|
| 310 |
+
Vectorized<c10::complex<float>> reciprocal() const;
|
| 311 |
+
Vectorized<c10::complex<float>> rsqrt() const {
|
| 312 |
+
return sqrt().reciprocal();
|
| 313 |
+
}
|
| 314 |
+
Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
|
| 315 |
+
__at_align__ c10::complex<float> x_tmp[size()];
|
| 316 |
+
__at_align__ c10::complex<float> y_tmp[size()];
|
| 317 |
+
store(x_tmp);
|
| 318 |
+
exp.store(y_tmp);
|
| 319 |
+
for (const auto i : c10::irange(size())) {
|
| 320 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 321 |
+
}
|
| 322 |
+
return loadu(x_tmp);
|
| 323 |
+
}
|
| 324 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 325 |
+
// `O`: get false if an operand is NaN
|
| 326 |
+
// `Q`: do not raise if an operand is NaN
|
| 327 |
+
Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
|
| 328 |
+
return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
|
| 329 |
+
}
|
| 330 |
+
Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
|
| 331 |
+
return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
|
| 332 |
+
}
|
| 333 |
+
Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& /*other*/) const {
|
| 334 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 335 |
+
}
|
| 336 |
+
Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& /*other*/) const {
|
| 337 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 338 |
+
}
|
| 339 |
+
Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& /*other*/) const {
|
| 340 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 341 |
+
}
|
| 342 |
+
Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& /*other*/) const {
|
| 343 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
|
| 347 |
+
Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
|
| 348 |
+
};
|
| 349 |
+
|
| 350 |
+
template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
| 351 |
+
return _mm256_add_ps(a, b);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
| 355 |
+
return _mm256_sub_ps(a, b);
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
| 359 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 360 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 361 |
+
auto ac_bd = _mm256_mul_ps(a, b); //ac bd
|
| 362 |
+
|
| 363 |
+
auto d_c = _mm256_permute_ps(b, 0xB1); //d c
|
| 364 |
+
d_c = _mm256_xor_ps(sign_mask, d_c); //d -c
|
| 365 |
+
auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc
|
| 366 |
+
|
| 367 |
+
auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
|
| 368 |
+
ret = _mm256_permute_ps(ret, 0xD8);
|
| 369 |
+
return ret;
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
|
| 373 |
+
//re + im*i = (a + bi) / (c + di)
|
| 374 |
+
auto mask = _mm256_set1_ps(-0.f);
|
| 375 |
+
auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d|
|
| 376 |
+
auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c|
|
| 377 |
+
auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
| 378 |
+
auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc
|
| 379 |
+
auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc
|
| 380 |
+
auto acbd2 = _mm256_mul_ps(a2, b2);
|
| 381 |
+
|
| 382 |
+
const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
|
| 383 |
+
auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc
|
| 384 |
+
dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
|
| 385 |
+
auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
|
| 386 |
+
auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
| 387 |
+
res2 = _mm256_permute_ps(res2, 0xD8);
|
| 388 |
+
|
| 389 |
+
// get the denominator
|
| 390 |
+
auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 391 |
+
res2 = _mm256_div_ps(res2, denom2);
|
| 392 |
+
return res2;
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
// reciprocal. Implement this here so we can use multiplication.
|
| 396 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
|
| 397 |
+
//re + im*i = (a + bi) / (c + di)
|
| 398 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
| 399 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
| 400 |
+
const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 401 |
+
auto c_d = _mm256_xor_ps(sign_mask, values); //c -d
|
| 402 |
+
return _mm256_div_ps(c_d, abs_2_());
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
|
| 406 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 407 |
+
const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
|
| 408 |
+
const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
|
| 409 |
+
|
| 410 |
+
auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b
|
| 411 |
+
auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b
|
| 412 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
| 413 |
+
return i_half*ln; // i/2*ln()
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
template <>
|
| 417 |
+
Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 418 |
+
auto abs_a = a.abs_2_();
|
| 419 |
+
auto abs_b = b.abs_2_();
|
| 420 |
+
auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
|
| 421 |
+
auto max = _mm256_blendv_ps(a, b, mask);
|
| 422 |
+
// Exploit the fact that all-ones is a NaN.
|
| 423 |
+
auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
| 424 |
+
return _mm256_or_ps(max, isnan);
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
template <>
|
| 428 |
+
Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 429 |
+
auto abs_a = a.abs_2_();
|
| 430 |
+
auto abs_b = b.abs_2_();
|
| 431 |
+
auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
|
| 432 |
+
auto min = _mm256_blendv_ps(a, b, mask);
|
| 433 |
+
// Exploit the fact that all-ones is a NaN.
|
| 434 |
+
auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
| 435 |
+
return _mm256_or_ps(min, isnan);
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
template <>
|
| 439 |
+
Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 440 |
+
return _mm256_and_ps(a, b);
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
template <>
|
| 444 |
+
Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 445 |
+
return _mm256_or_ps(a, b);
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
template <>
|
| 449 |
+
Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
|
| 450 |
+
return _mm256_xor_ps(a, b);
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
|
| 454 |
+
const Vectorized<c10::complex<float>>& other) const {
|
| 455 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 456 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 457 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
|
| 461 |
+
const Vectorized<c10::complex<float>>& other) const {
|
| 462 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 463 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 464 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
#endif
|
| 468 |
+
|
| 469 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h
ADDED
|
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 10 |
+
#define SLEEF_STATIC_LIBS
|
| 11 |
+
#include <sleef.h>
|
| 12 |
+
#endif
|
| 13 |
+
|
| 14 |
+
namespace at::vec {
|
| 15 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 16 |
+
inline namespace CPU_CAPABILITY {
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 20 |
+
|
| 21 |
+
template <> class Vectorized<double> {
|
| 22 |
+
private:
|
| 23 |
+
__m256d values;
|
| 24 |
+
public:
|
| 25 |
+
using value_type = double;
|
| 26 |
+
using size_type = int;
|
| 27 |
+
static constexpr size_type size() {
|
| 28 |
+
return 4;
|
| 29 |
+
}
|
| 30 |
+
Vectorized() {}
|
| 31 |
+
Vectorized(__m256d v) : values(v) {}
|
| 32 |
+
Vectorized(double val) {
|
| 33 |
+
values = _mm256_set1_pd(val);
|
| 34 |
+
}
|
| 35 |
+
Vectorized(double val1, double val2, double val3, double val4) {
|
| 36 |
+
values = _mm256_setr_pd(val1, val2, val3, val4);
|
| 37 |
+
}
|
| 38 |
+
operator __m256d() const {
|
| 39 |
+
return values;
|
| 40 |
+
}
|
| 41 |
+
template <int64_t mask>
|
| 42 |
+
static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 43 |
+
return _mm256_blend_pd(a.values, b.values, mask);
|
| 44 |
+
}
|
| 45 |
+
static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
|
| 46 |
+
const Vectorized<double>& mask) {
|
| 47 |
+
return _mm256_blendv_pd(a.values, b.values, mask.values);
|
| 48 |
+
}
|
| 49 |
+
template<typename step_t>
|
| 50 |
+
static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
|
| 51 |
+
return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
|
| 52 |
+
}
|
| 53 |
+
static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
|
| 54 |
+
int64_t count = size()) {
|
| 55 |
+
switch (count) {
|
| 56 |
+
case 0:
|
| 57 |
+
return a;
|
| 58 |
+
case 1:
|
| 59 |
+
return blend<1>(a, b);
|
| 60 |
+
case 2:
|
| 61 |
+
return blend<3>(a, b);
|
| 62 |
+
case 3:
|
| 63 |
+
return blend<7>(a, b);
|
| 64 |
+
}
|
| 65 |
+
return b;
|
| 66 |
+
}
|
| 67 |
+
static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
|
| 68 |
+
if (count == size())
|
| 69 |
+
return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
__at_align__ double tmp_values[size()];
|
| 73 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 74 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 75 |
+
// instructions while a loop would be compiled to one instruction.
|
| 76 |
+
for (const auto i : c10::irange(size())) {
|
| 77 |
+
tmp_values[i] = 0.0;
|
| 78 |
+
}
|
| 79 |
+
std::memcpy(
|
| 80 |
+
tmp_values,
|
| 81 |
+
reinterpret_cast<const double*>(ptr),
|
| 82 |
+
count * sizeof(double));
|
| 83 |
+
return _mm256_load_pd(tmp_values);
|
| 84 |
+
}
|
| 85 |
+
void store(void* ptr, int count = size()) const {
|
| 86 |
+
if (count == size()) {
|
| 87 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
|
| 88 |
+
} else if (count > 0) {
|
| 89 |
+
double tmp_values[size()];
|
| 90 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
|
| 91 |
+
std::memcpy(ptr, tmp_values, count * sizeof(double));
|
| 92 |
+
}
|
| 93 |
+
}
|
| 94 |
+
const double& operator[](int idx) const = delete;
|
| 95 |
+
double& operator[](int idx) = delete;
|
| 96 |
+
int zero_mask() const {
|
| 97 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 98 |
+
__m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
|
| 99 |
+
return _mm256_movemask_pd(cmp);
|
| 100 |
+
}
|
| 101 |
+
Vectorized<double> isnan() const {
|
| 102 |
+
return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
|
| 103 |
+
}
|
| 104 |
+
bool has_inf_nan() const {
|
| 105 |
+
__m256d self_sub = _mm256_sub_pd(values, values);
|
| 106 |
+
return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0;
|
| 107 |
+
}
|
| 108 |
+
Vectorized<double> map(double (*const f)(double)) const {
|
| 109 |
+
__at_align__ double tmp[size()];
|
| 110 |
+
store(tmp);
|
| 111 |
+
for (const auto i : c10::irange(size())) {
|
| 112 |
+
tmp[i] = f(tmp[i]);
|
| 113 |
+
}
|
| 114 |
+
return loadu(tmp);
|
| 115 |
+
}
|
| 116 |
+
Vectorized<double> abs() const {
|
| 117 |
+
auto mask = _mm256_set1_pd(-0.f);
|
| 118 |
+
return _mm256_andnot_pd(mask, values);
|
| 119 |
+
}
|
| 120 |
+
Vectorized<double> angle() const {
|
| 121 |
+
const auto zero_vec = _mm256_set1_pd(0.f);
|
| 122 |
+
const auto nan_vec = _mm256_set1_pd(NAN);
|
| 123 |
+
const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ);
|
| 124 |
+
const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ);
|
| 125 |
+
const auto pi = _mm256_set1_pd(c10::pi<double>);
|
| 126 |
+
|
| 127 |
+
const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ);
|
| 128 |
+
auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask);
|
| 129 |
+
angle = _mm256_blendv_pd(angle, nan_vec, nan_mask);
|
| 130 |
+
return angle;
|
| 131 |
+
}
|
| 132 |
+
Vectorized<double> real() const {
|
| 133 |
+
return *this;
|
| 134 |
+
}
|
| 135 |
+
Vectorized<double> imag() const {
|
| 136 |
+
return _mm256_set1_pd(0);
|
| 137 |
+
}
|
| 138 |
+
Vectorized<double> conj() const {
|
| 139 |
+
return *this;
|
| 140 |
+
}
|
| 141 |
+
Vectorized<double> acos() const {
|
| 142 |
+
return Vectorized<double>(Sleef_acosd4_u10(values));
|
| 143 |
+
}
|
| 144 |
+
Vectorized<double> acosh() const {
|
| 145 |
+
return Vectorized<double>(Sleef_acoshd4_u10(values));
|
| 146 |
+
}
|
| 147 |
+
Vectorized<double> asin() const {
|
| 148 |
+
return Vectorized<double>(Sleef_asind4_u10(values));
|
| 149 |
+
}
|
| 150 |
+
Vectorized<double> atan() const {
|
| 151 |
+
return Vectorized<double>(Sleef_atand4_u10(values));
|
| 152 |
+
}
|
| 153 |
+
Vectorized<double> atanh() const {
|
| 154 |
+
return Vectorized<double>(Sleef_atanhd4_u10(values));
|
| 155 |
+
}
|
| 156 |
+
Vectorized<double> atan2(const Vectorized<double> &b) const {
|
| 157 |
+
return Vectorized<double>(Sleef_atan2d4_u10(values, b));
|
| 158 |
+
}
|
| 159 |
+
Vectorized<double> copysign(const Vectorized<double> &sign) const {
|
| 160 |
+
return Vectorized<double>(Sleef_copysignd4(values, sign));
|
| 161 |
+
}
|
| 162 |
+
Vectorized<double> erf() const {
|
| 163 |
+
return Vectorized<double>(Sleef_erfd4_u10(values));
|
| 164 |
+
}
|
| 165 |
+
Vectorized<double> erfc() const {
|
| 166 |
+
return Vectorized<double>(Sleef_erfcd4_u15(values));
|
| 167 |
+
}
|
| 168 |
+
Vectorized<double> erfinv() const {
|
| 169 |
+
return map(calc_erfinv);
|
| 170 |
+
}
|
| 171 |
+
Vectorized<double> exp() const {
|
| 172 |
+
return Vectorized<double>(Sleef_expd4_u10(values));
|
| 173 |
+
}
|
| 174 |
+
Vectorized<double> exp2() const {
|
| 175 |
+
return Vectorized<double>(Sleef_exp2d4_u10(values));
|
| 176 |
+
}
|
| 177 |
+
Vectorized<double> expm1() const {
|
| 178 |
+
return Vectorized<double>(Sleef_expm1d4_u10(values));
|
| 179 |
+
}
|
| 180 |
+
Vectorized<double> exp_u20() const {
|
| 181 |
+
return exp();
|
| 182 |
+
}
|
| 183 |
+
Vectorized<double> fmod(const Vectorized<double>& q) const {
|
| 184 |
+
return Vectorized<double>(Sleef_fmodd4(values, q));
|
| 185 |
+
}
|
| 186 |
+
Vectorized<double> hypot(const Vectorized<double> &b) const {
|
| 187 |
+
return Vectorized<double>(Sleef_hypotd4_u05(values, b));
|
| 188 |
+
}
|
| 189 |
+
Vectorized<double> i0() const {
|
| 190 |
+
return map(calc_i0);
|
| 191 |
+
}
|
| 192 |
+
Vectorized<double> i0e() const {
|
| 193 |
+
return map(calc_i0e);
|
| 194 |
+
}
|
| 195 |
+
Vectorized<double> digamma() const {
|
| 196 |
+
return map(calc_digamma);
|
| 197 |
+
}
|
| 198 |
+
Vectorized<double> igamma(const Vectorized<double> &x) const {
|
| 199 |
+
__at_align__ double tmp[size()];
|
| 200 |
+
__at_align__ double tmp_x[size()];
|
| 201 |
+
store(tmp);
|
| 202 |
+
x.store(tmp_x);
|
| 203 |
+
for (const auto i : c10::irange(size())) {
|
| 204 |
+
tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
|
| 205 |
+
}
|
| 206 |
+
return loadu(tmp);
|
| 207 |
+
}
|
| 208 |
+
Vectorized<double> igammac(const Vectorized<double> &x) const {
|
| 209 |
+
__at_align__ double tmp[size()];
|
| 210 |
+
__at_align__ double tmp_x[size()];
|
| 211 |
+
store(tmp);
|
| 212 |
+
x.store(tmp_x);
|
| 213 |
+
for (const auto i : c10::irange(size())) {
|
| 214 |
+
tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
|
| 215 |
+
}
|
| 216 |
+
return loadu(tmp);
|
| 217 |
+
}
|
| 218 |
+
Vectorized<double> log() const {
|
| 219 |
+
return Vectorized<double>(Sleef_logd4_u10(values));
|
| 220 |
+
}
|
| 221 |
+
Vectorized<double> log2() const {
|
| 222 |
+
return Vectorized<double>(Sleef_log2d4_u10(values));
|
| 223 |
+
}
|
| 224 |
+
Vectorized<double> log10() const {
|
| 225 |
+
return Vectorized<double>(Sleef_log10d4_u10(values));
|
| 226 |
+
}
|
| 227 |
+
Vectorized<double> log1p() const {
|
| 228 |
+
return Vectorized<double>(Sleef_log1pd4_u10(values));
|
| 229 |
+
}
|
| 230 |
+
Vectorized<double> sin() const {
|
| 231 |
+
return Vectorized<double>(Sleef_sind4_u10(values));
|
| 232 |
+
}
|
| 233 |
+
Vectorized<double> sinh() const {
|
| 234 |
+
return Vectorized<double>(Sleef_sinhd4_u10(values));
|
| 235 |
+
}
|
| 236 |
+
Vectorized<double> cos() const {
|
| 237 |
+
return Vectorized<double>(Sleef_cosd4_u10(values));
|
| 238 |
+
}
|
| 239 |
+
Vectorized<double> cosh() const {
|
| 240 |
+
return Vectorized<double>(Sleef_coshd4_u10(values));
|
| 241 |
+
}
|
| 242 |
+
Vectorized<double> ceil() const {
|
| 243 |
+
return _mm256_ceil_pd(values);
|
| 244 |
+
}
|
| 245 |
+
Vectorized<double> floor() const {
|
| 246 |
+
return _mm256_floor_pd(values);
|
| 247 |
+
}
|
| 248 |
+
Vectorized<double> frac() const;
|
| 249 |
+
Vectorized<double> neg() const {
|
| 250 |
+
return _mm256_xor_pd(_mm256_set1_pd(-0.), values);
|
| 251 |
+
}
|
| 252 |
+
Vectorized<double> nextafter(const Vectorized<double> &b) const {
|
| 253 |
+
return Vectorized<double>(Sleef_nextafterd4(values, b));
|
| 254 |
+
}
|
| 255 |
+
Vectorized<double> round() const {
|
| 256 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 257 |
+
}
|
| 258 |
+
Vectorized<double> tan() const {
|
| 259 |
+
return Vectorized<double>(Sleef_tand4_u10(values));
|
| 260 |
+
}
|
| 261 |
+
Vectorized<double> tanh() const {
|
| 262 |
+
return Vectorized<double>(Sleef_tanhd4_u10(values));
|
| 263 |
+
}
|
| 264 |
+
Vectorized<double> trunc() const {
|
| 265 |
+
return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 266 |
+
}
|
| 267 |
+
Vectorized<double> lgamma() const {
|
| 268 |
+
return Vectorized<double>(Sleef_lgammad4_u10(values));
|
| 269 |
+
}
|
| 270 |
+
Vectorized<double> sqrt() const {
|
| 271 |
+
return _mm256_sqrt_pd(values);
|
| 272 |
+
}
|
| 273 |
+
Vectorized<double> reciprocal() const {
|
| 274 |
+
return _mm256_div_pd(_mm256_set1_pd(1), values);
|
| 275 |
+
}
|
| 276 |
+
Vectorized<double> rsqrt() const {
|
| 277 |
+
return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values));
|
| 278 |
+
}
|
| 279 |
+
Vectorized<double> pow(const Vectorized<double> &b) const {
|
| 280 |
+
return Vectorized<double>(Sleef_powd4_u10(values, b));
|
| 281 |
+
}
|
| 282 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 283 |
+
// `O`: get false if an operand is NaN
|
| 284 |
+
// `Q`: do not raise if an operand is NaN
|
| 285 |
+
Vectorized<double> operator==(const Vectorized<double>& other) const {
|
| 286 |
+
return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
Vectorized<double> operator!=(const Vectorized<double>& other) const {
|
| 290 |
+
return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
Vectorized<double> operator<(const Vectorized<double>& other) const {
|
| 294 |
+
return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ);
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
Vectorized<double> operator<=(const Vectorized<double>& other) const {
|
| 298 |
+
return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ);
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
Vectorized<double> operator>(const Vectorized<double>& other) const {
|
| 302 |
+
return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
Vectorized<double> operator>=(const Vectorized<double>& other) const {
|
| 306 |
+
return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ);
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
Vectorized<double> eq(const Vectorized<double>& other) const;
|
| 310 |
+
Vectorized<double> ne(const Vectorized<double>& other) const;
|
| 311 |
+
Vectorized<double> lt(const Vectorized<double>& other) const;
|
| 312 |
+
Vectorized<double> le(const Vectorized<double>& other) const;
|
| 313 |
+
Vectorized<double> gt(const Vectorized<double>& other) const;
|
| 314 |
+
Vectorized<double> ge(const Vectorized<double>& other) const;
|
| 315 |
+
};
|
| 316 |
+
|
| 317 |
+
template <>
|
| 318 |
+
Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 319 |
+
return _mm256_add_pd(a, b);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
template <>
|
| 323 |
+
Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 324 |
+
return _mm256_sub_pd(a, b);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
template <>
|
| 328 |
+
Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 329 |
+
return _mm256_mul_pd(a, b);
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
template <>
|
| 333 |
+
Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 334 |
+
return _mm256_div_pd(a, b);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
// frac. Implement this here so we can use subtraction.
|
| 338 |
+
inline Vectorized<double> Vectorized<double>::frac() const {
|
| 339 |
+
return *this - this->trunc();
|
| 340 |
+
}
|
| 341 |
+
|
| 342 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 343 |
+
// either input is a NaN.
|
| 344 |
+
template <>
|
| 345 |
+
Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 346 |
+
Vectorized<double> max = _mm256_max_pd(a, b);
|
| 347 |
+
Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
|
| 348 |
+
// Exploit the fact that all-ones is a NaN.
|
| 349 |
+
return _mm256_or_pd(max, isnan);
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 353 |
+
// either input is a NaN.
|
| 354 |
+
template <>
|
| 355 |
+
Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 356 |
+
Vectorized<double> min = _mm256_min_pd(a, b);
|
| 357 |
+
Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
|
| 358 |
+
// Exploit the fact that all-ones is a NaN.
|
| 359 |
+
return _mm256_or_pd(min, isnan);
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
template <>
|
| 363 |
+
Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
|
| 364 |
+
return _mm256_min_pd(max, _mm256_max_pd(min, a));
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
template <>
|
| 368 |
+
Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
|
| 369 |
+
return _mm256_max_pd(min, a);
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
template <>
|
| 373 |
+
Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
|
| 374 |
+
return _mm256_min_pd(max, a);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
template <>
|
| 378 |
+
Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 379 |
+
return _mm256_and_pd(a, b);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
template <>
|
| 383 |
+
Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 384 |
+
return _mm256_or_pd(a, b);
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
template <>
|
| 388 |
+
Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 389 |
+
return _mm256_xor_pd(a, b);
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
|
| 393 |
+
return (*this == other) & Vectorized<double>(1.0);
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
|
| 397 |
+
return (*this != other) & Vectorized<double>(1.0);
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
|
| 401 |
+
return (*this > other) & Vectorized<double>(1.0);
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
|
| 405 |
+
return (*this >= other) & Vectorized<double>(1.0);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
|
| 409 |
+
return (*this < other) & Vectorized<double>(1.0);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
|
| 413 |
+
return (*this <= other) & Vectorized<double>(1.0);
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
template <>
|
| 417 |
+
inline void convert(const double* src, double* dst, int64_t n) {
|
| 418 |
+
int64_t i;
|
| 419 |
+
#pragma unroll
|
| 420 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
| 421 |
+
_mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
|
| 422 |
+
}
|
| 423 |
+
#pragma unroll
|
| 424 |
+
for (; i < n; i++) {
|
| 425 |
+
dst[i] = src[i];
|
| 426 |
+
}
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 430 |
+
template <>
|
| 431 |
+
Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
|
| 432 |
+
return _mm256_fmadd_pd(a, b, c);
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
template <>
|
| 436 |
+
Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
|
| 437 |
+
return _mm256_fmsub_pd(a, b, c);
|
| 438 |
+
}
|
| 439 |
+
#endif
|
| 440 |
+
|
| 441 |
+
#endif
|
| 442 |
+
|
| 443 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h
ADDED
|
@@ -0,0 +1,818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec256/vec256_float_neon.h>
|
| 8 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 9 |
+
#include <c10/util/Half.h>
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
|
| 12 |
+
namespace at::vec {
|
| 13 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 14 |
+
inline namespace CPU_CAPABILITY {
|
| 15 |
+
|
| 16 |
+
// Right now contains only aarch64 implementation.
|
| 17 |
+
// Due to follow two reasons aarch32 is not currently supported.
|
| 18 |
+
// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
|
| 19 |
+
// that work for aarch64 dont work for aarch32.
|
| 20 |
+
// 2. Android NDK r21 has problems with compiling aarch32.
|
| 21 |
+
// Clang seg faults.
|
| 22 |
+
// https://github.com/android/ndk/issues/1248
|
| 23 |
+
// https://bugs.llvm.org/show_bug.cgi?id=45824
|
| 24 |
+
// Most likely we will do aarch32 support with inline asm.
|
| 25 |
+
#if !defined(C10_MOBILE) && defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
| 26 |
+
|
| 27 |
+
#ifdef __BIG_ENDIAN__
|
| 28 |
+
#error "Big endian is not supported."
|
| 29 |
+
#endif
|
| 30 |
+
|
| 31 |
+
template <int index, bool mask_val>
|
| 32 |
+
struct BlendHalfRegs {
|
| 33 |
+
static float16x8_t impl(
|
| 34 |
+
const float16x8_t& a,
|
| 35 |
+
const float16x8_t& b,
|
| 36 |
+
float16x8_t& res);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
template <int index>
|
| 40 |
+
struct BlendHalfRegs<index, true> {
|
| 41 |
+
static float16x8_t impl(
|
| 42 |
+
const float16x8_t& a,
|
| 43 |
+
const float16x8_t& b,
|
| 44 |
+
float16x8_t& res) {
|
| 45 |
+
return vsetq_lane_f16(vgetq_lane_f16(b, index), res, index);
|
| 46 |
+
}
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
template <int index>
|
| 50 |
+
struct BlendHalfRegs<index, false> {
|
| 51 |
+
static float16x8_t impl(
|
| 52 |
+
const float16x8_t& a,
|
| 53 |
+
const float16x8_t& b,
|
| 54 |
+
float16x8_t& res) {
|
| 55 |
+
return vsetq_lane_f16(vgetq_lane_f16(a, index), res, index);
|
| 56 |
+
}
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
// On ARM, Half type supports float16_t->Half constructor and Half->float16_t
|
| 60 |
+
// conversion
|
| 61 |
+
template <>
|
| 62 |
+
class Vectorized<c10::Half> {
|
| 63 |
+
private:
|
| 64 |
+
float16x8x2_t values;
|
| 65 |
+
|
| 66 |
+
public:
|
| 67 |
+
// value_type should be c10::Half to fit interface with vec_base.h
|
| 68 |
+
using value_type = c10::Half;
|
| 69 |
+
using size_type = int;
|
| 70 |
+
static constexpr size_type size() {
|
| 71 |
+
static_assert(sizeof(float16x8x2_t) == 16 * sizeof(value_type));
|
| 72 |
+
return 16;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
private:
|
| 76 |
+
// We use these private map functions to implement various methods
|
| 77 |
+
Vectorized<c10::Half> map2(
|
| 78 |
+
const Vectorized<c10::Half>& second,
|
| 79 |
+
c10::Half (*const f)(c10::Half, c10::Half)) const {
|
| 80 |
+
__at_align__ c10::Half tmp_first[size()];
|
| 81 |
+
__at_align__ c10::Half tmp_second[size()];
|
| 82 |
+
store(tmp_first); // store this to tmp_first
|
| 83 |
+
second.store(tmp_second);
|
| 84 |
+
for (const auto i : c10::irange(size())) {
|
| 85 |
+
tmp_first[i] = f(tmp_first[i], tmp_second[i]);
|
| 86 |
+
}
|
| 87 |
+
return loadu(tmp_first);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
Vectorized<c10::Half> map_with_vec_float_method(
|
| 91 |
+
Vectorized<float> (Vectorized<float>::*m)() const) const {
|
| 92 |
+
// Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert
|
| 93 |
+
// back
|
| 94 |
+
float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0]));
|
| 95 |
+
float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0]));
|
| 96 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00, v01).*m)();
|
| 97 |
+
float16x4_t r00 = vcvt_f16_f32(mv0.get_low());
|
| 98 |
+
float16x4_t r01 = vcvt_f16_f32(mv0.get_high());
|
| 99 |
+
|
| 100 |
+
// Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert
|
| 101 |
+
// back
|
| 102 |
+
float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1]));
|
| 103 |
+
float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1]));
|
| 104 |
+
Vectorized<float> mv1 = (Vectorized<float>(v10, v11).*m)();
|
| 105 |
+
float16x4_t r10 = vcvt_f16_f32(mv1.get_low());
|
| 106 |
+
float16x4_t r11 = vcvt_f16_f32(mv1.get_high());
|
| 107 |
+
|
| 108 |
+
// Pack result into Vectorized<c10::Half>
|
| 109 |
+
return Vectorized<c10::Half>(
|
| 110 |
+
vcombine_f16(r00, r01), vcombine_f16(r10, r11));
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
Vectorized<c10::Half> map2_with_vec_float_method(
|
| 114 |
+
const Vectorized<c10::Half>& second,
|
| 115 |
+
Vectorized<float> (Vectorized<float>::*m)(const Vectorized<float>&)
|
| 116 |
+
const) const {
|
| 117 |
+
// Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert
|
| 118 |
+
// back
|
| 119 |
+
float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0]));
|
| 120 |
+
float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0]));
|
| 121 |
+
float32x4_t second_v00 = vcvt_f32_f16(vget_low_f16(second.get_low()));
|
| 122 |
+
float32x4_t second_v01 = vcvt_f32_f16(vget_high_f16(second.get_low()));
|
| 123 |
+
Vectorized<float> mv0 = (Vectorized<float>(v00, v01).*m)(
|
| 124 |
+
Vectorized<float>(second_v00, second_v01));
|
| 125 |
+
float16x4_t r00 = vcvt_f16_f32(mv0.get_low());
|
| 126 |
+
float16x4_t r01 = vcvt_f16_f32(mv0.get_high());
|
| 127 |
+
|
| 128 |
+
// Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert
|
| 129 |
+
// back
|
| 130 |
+
float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1]));
|
| 131 |
+
float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1]));
|
| 132 |
+
float32x4_t second_v10 = vcvt_f32_f16(vget_low_f16(second.get_high()));
|
| 133 |
+
float32x4_t second_v11 = vcvt_f32_f16(vget_high_f16(second.get_high()));
|
| 134 |
+
Vectorized<float> mv1 = (Vectorized<float>(v10, v11).*m)(
|
| 135 |
+
Vectorized<float>(second_v10, second_v11));
|
| 136 |
+
float16x4_t r10 = vcvt_f16_f32(mv1.get_low());
|
| 137 |
+
float16x4_t r11 = vcvt_f16_f32(mv1.get_high());
|
| 138 |
+
|
| 139 |
+
// Pack result into Vectorized<c10::Half>
|
| 140 |
+
return Vectorized<c10::Half>(
|
| 141 |
+
vcombine_f16(r00, r01), vcombine_f16(r10, r11));
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
public:
|
| 145 |
+
// constructor
|
| 146 |
+
Vectorized() {}
|
| 147 |
+
Vectorized(float16x8x2_t v) : values(v) {}
|
| 148 |
+
|
| 149 |
+
// A ctor that accepts c10::Half is needed to fit interface with vec_base.h
|
| 150 |
+
// A second constructor that takes float16_t is also included
|
| 151 |
+
Vectorized(c10::Half val)
|
| 152 |
+
: values{vdupq_n_f16((float16_t)val), vdupq_n_f16((float16_t)val)} {
|
| 153 |
+
}
|
| 154 |
+
Vectorized(float16_t val) : values{vdupq_n_f16(val), vdupq_n_f16(val)} {}
|
| 155 |
+
Vectorized(
|
| 156 |
+
float16_t val0,
|
| 157 |
+
float16_t val1,
|
| 158 |
+
float16_t val2,
|
| 159 |
+
float16_t val3,
|
| 160 |
+
float16_t val4,
|
| 161 |
+
float16_t val5,
|
| 162 |
+
float16_t val6,
|
| 163 |
+
float16_t val7,
|
| 164 |
+
float16_t val8,
|
| 165 |
+
float16_t val9,
|
| 166 |
+
float16_t val10,
|
| 167 |
+
float16_t val11,
|
| 168 |
+
float16_t val12,
|
| 169 |
+
float16_t val13,
|
| 170 |
+
float16_t val14,
|
| 171 |
+
float16_t val15)
|
| 172 |
+
: values{
|
| 173 |
+
val0,
|
| 174 |
+
val1,
|
| 175 |
+
val2,
|
| 176 |
+
val3,
|
| 177 |
+
val4,
|
| 178 |
+
val5,
|
| 179 |
+
val6,
|
| 180 |
+
val7,
|
| 181 |
+
val8,
|
| 182 |
+
val9,
|
| 183 |
+
val10,
|
| 184 |
+
val11,
|
| 185 |
+
val12,
|
| 186 |
+
val13,
|
| 187 |
+
val14,
|
| 188 |
+
val15} {}
|
| 189 |
+
Vectorized(float16x8_t val0, float16x8_t val1) : values{val0, val1} {}
|
| 190 |
+
operator float16x8x2_t() const {
|
| 191 |
+
return values;
|
| 192 |
+
}
|
| 193 |
+
template <int64_t mask>
|
| 194 |
+
static Vectorized<c10::Half> blend(
|
| 195 |
+
const Vectorized<c10::Half>& a,
|
| 196 |
+
const Vectorized<c10::Half>& b) {
|
| 197 |
+
Vectorized<c10::Half> vec;
|
| 198 |
+
// 0.
|
| 199 |
+
vec.values.val[0] = BlendHalfRegs<0, (mask & 0x01) != 0>::impl(
|
| 200 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 201 |
+
vec.values.val[0] = BlendHalfRegs<1, (mask & 0x02) != 0>::impl(
|
| 202 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 203 |
+
vec.values.val[0] = BlendHalfRegs<2, (mask & 0x04) != 0>::impl(
|
| 204 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 205 |
+
vec.values.val[0] = BlendHalfRegs<3, (mask & 0x08) != 0>::impl(
|
| 206 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 207 |
+
|
| 208 |
+
vec.values.val[0] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl(
|
| 209 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 210 |
+
vec.values.val[0] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl(
|
| 211 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 212 |
+
vec.values.val[0] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl(
|
| 213 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 214 |
+
vec.values.val[0] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl(
|
| 215 |
+
a.values.val[0], b.values.val[0], vec.values.val[0]);
|
| 216 |
+
|
| 217 |
+
// 1.
|
| 218 |
+
vec.values.val[1] = BlendHalfRegs<0, (mask & 0x10) != 0>::impl(
|
| 219 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 220 |
+
vec.values.val[1] = BlendHalfRegs<1, (mask & 0x20) != 0>::impl(
|
| 221 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 222 |
+
vec.values.val[1] = BlendHalfRegs<2, (mask & 0x40) != 0>::impl(
|
| 223 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 224 |
+
vec.values.val[1] = BlendHalfRegs<3, (mask & 0x80) != 0>::impl(
|
| 225 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 226 |
+
|
| 227 |
+
vec.values.val[1] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl(
|
| 228 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 229 |
+
vec.values.val[1] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl(
|
| 230 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 231 |
+
vec.values.val[1] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl(
|
| 232 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 233 |
+
vec.values.val[1] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl(
|
| 234 |
+
a.values.val[1], b.values.val[1], vec.values.val[1]);
|
| 235 |
+
|
| 236 |
+
return vec;
|
| 237 |
+
}
|
| 238 |
+
static Vectorized<c10::Half> blendv(
|
| 239 |
+
const Vectorized<c10::Half>& a,
|
| 240 |
+
const Vectorized<c10::Half>& b,
|
| 241 |
+
const Vectorized<c10::Half>& mask) {
|
| 242 |
+
// Note: using blendv is very awkward because 0xFFFF is one of many NaN's in
|
| 243 |
+
// FP16 It's unfortunate that the mask has type Half (required from
|
| 244 |
+
// vec_base)
|
| 245 |
+
|
| 246 |
+
// TODO
|
| 247 |
+
// NB: This requires that each value, i.e., each uint value,
|
| 248 |
+
// of the mask either all be zeros or all be 1s.
|
| 249 |
+
// We perhaps need some kind of an assert?
|
| 250 |
+
// But that will affect performance.
|
| 251 |
+
Vectorized<c10::Half> vec(mask.values);
|
| 252 |
+
vec.values.val[0] = vbslq_f16(
|
| 253 |
+
vreinterpretq_u16_f16(vec.values.val[0]),
|
| 254 |
+
b.values.val[0],
|
| 255 |
+
a.values.val[0]);
|
| 256 |
+
vec.values.val[1] = vbslq_f16(
|
| 257 |
+
vreinterpretq_u16_f16(vec.values.val[1]),
|
| 258 |
+
b.values.val[1],
|
| 259 |
+
a.values.val[1]);
|
| 260 |
+
return vec;
|
| 261 |
+
}
|
| 262 |
+
template <typename step_t>
|
| 263 |
+
static Vectorized<c10::Half> arange(
|
| 264 |
+
c10::Half base = 0.0,
|
| 265 |
+
step_t step = static_cast<step_t>(1)) {
|
| 266 |
+
const Vectorized<c10::Half> base_vec(base);
|
| 267 |
+
const Vectorized<c10::Half> step_vec(step);
|
| 268 |
+
const Vectorized<c10::Half> step_sizes(
|
| 269 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
| 270 |
+
return fmadd(step_sizes, step_vec, base_vec);
|
| 271 |
+
}
|
| 272 |
+
static Vectorized<c10::Half> set(
|
| 273 |
+
const Vectorized<c10::Half>& a,
|
| 274 |
+
const Vectorized<c10::Half>& b,
|
| 275 |
+
int64_t count = size()) {
|
| 276 |
+
uint16_t pre_mask[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
| 277 |
+
for (int i = 0; i < count; i++) {
|
| 278 |
+
pre_mask[i] = 0xFFFF;
|
| 279 |
+
}
|
| 280 |
+
uint16x8x2_t mask = vld1q_u16_x2(pre_mask);
|
| 281 |
+
|
| 282 |
+
// Using blendv is awkward because 0xFFFF is one of many NaN's in FP16
|
| 283 |
+
// so we directly use vbslq_f16 instead
|
| 284 |
+
Vectorized<c10::Half> vec(
|
| 285 |
+
vbslq_f16(
|
| 286 |
+
// Low bits
|
| 287 |
+
mask.val[0],
|
| 288 |
+
b.values.val[0],
|
| 289 |
+
a.values.val[0]),
|
| 290 |
+
// High bits
|
| 291 |
+
vbslq_f16(mask.val[1], b.values.val[1], a.values.val[1]));
|
| 292 |
+
|
| 293 |
+
return vec;
|
| 294 |
+
}
|
| 295 |
+
static Vectorized<c10::Half> loadu(const void* ptr, int64_t count = size()) {
|
| 296 |
+
if (count == size()) {
|
| 297 |
+
return vld1q_f16_x2(reinterpret_cast<const float16_t*>(ptr));
|
| 298 |
+
} else if (count == (size() >> 1)) {
|
| 299 |
+
Vectorized<c10::Half> res;
|
| 300 |
+
res.values.val[0] = vld1q_f16(reinterpret_cast<const float16_t*>(ptr));
|
| 301 |
+
std::memset(&res.values.val[1], 0, sizeof(res.values.val[1]));
|
| 302 |
+
return res;
|
| 303 |
+
}
|
| 304 |
+
__at_align__ float16_t tmp_values[size()];
|
| 305 |
+
for (const auto i : c10::irange(size())) {
|
| 306 |
+
tmp_values[i] = 0;
|
| 307 |
+
}
|
| 308 |
+
std::memcpy(
|
| 309 |
+
tmp_values,
|
| 310 |
+
reinterpret_cast<const float16_t*>(ptr),
|
| 311 |
+
count * sizeof(float16_t));
|
| 312 |
+
return vld1q_f16_x2(reinterpret_cast<const float16_t*>(tmp_values));
|
| 313 |
+
}
|
| 314 |
+
void store(void* ptr, int64_t count = size()) const {
|
| 315 |
+
if (count == size()) {
|
| 316 |
+
vst1q_f16_x2(reinterpret_cast<float16_t*>(ptr), values);
|
| 317 |
+
return;
|
| 318 |
+
} else if (count == (size() >> 1)) {
|
| 319 |
+
vst1q_f16(reinterpret_cast<float16_t*>(ptr), values.val[0]);
|
| 320 |
+
} else {
|
| 321 |
+
float16_t tmp_values[size()];
|
| 322 |
+
vst1q_f16_x2(reinterpret_cast<float16_t*>(tmp_values), values);
|
| 323 |
+
std::memcpy(ptr, tmp_values, count * sizeof(float16_t));
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
inline const float16x8_t& get_low() const {
|
| 327 |
+
return values.val[0];
|
| 328 |
+
}
|
| 329 |
+
inline float16x8_t& get_low() {
|
| 330 |
+
return values.val[0];
|
| 331 |
+
}
|
| 332 |
+
inline const float16x8_t& get_high() const {
|
| 333 |
+
return values.val[1];
|
| 334 |
+
}
|
| 335 |
+
inline float16x8_t& get_high() {
|
| 336 |
+
return values.val[1];
|
| 337 |
+
}
|
| 338 |
+
// Very slow implementation of indexing.
|
| 339 |
+
// Only required because vec256_qint refers to this.
|
| 340 |
+
// Once we specialize that implementation for ARM
|
| 341 |
+
// this should be removed. TODO (kimishpatel)
|
| 342 |
+
c10::Half operator[](int idx) const {
|
| 343 |
+
__at_align__ c10::Half tmp[size()];
|
| 344 |
+
store(tmp);
|
| 345 |
+
return tmp[idx];
|
| 346 |
+
}
|
| 347 |
+
c10::Half operator[](int idx) {
|
| 348 |
+
__at_align__ c10::Half tmp[size()];
|
| 349 |
+
store(tmp);
|
| 350 |
+
return tmp[idx];
|
| 351 |
+
}
|
| 352 |
+
// For boolean version where we want to if any 1/all zero
|
| 353 |
+
// etc. can be done faster in a different way.
|
| 354 |
+
int zero_mask() const {
|
| 355 |
+
__at_align__ c10::Half tmp[size()];
|
| 356 |
+
store(tmp);
|
| 357 |
+
int mask = 0;
|
| 358 |
+
for (int i = 0; i < size(); ++i) {
|
| 359 |
+
if (tmp[i] == 0) {
|
| 360 |
+
mask |= (1 << i);
|
| 361 |
+
}
|
| 362 |
+
}
|
| 363 |
+
return mask;
|
| 364 |
+
}
|
| 365 |
+
Vectorized<c10::Half> isnan() const {
|
| 366 |
+
__at_align__ c10::Half tmp[size()];
|
| 367 |
+
__at_align__ c10::Half res[size()];
|
| 368 |
+
store(tmp);
|
| 369 |
+
for (const auto i : c10::irange(size())) {
|
| 370 |
+
if (_isnan(tmp[i])) {
|
| 371 |
+
std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(c10::Half));
|
| 372 |
+
} else {
|
| 373 |
+
std::memset(static_cast<void*>(&res[i]), 0, sizeof(c10::Half));
|
| 374 |
+
}
|
| 375 |
+
}
|
| 376 |
+
return loadu(res);
|
| 377 |
+
};
|
| 378 |
+
bool has_inf_nan() const {
|
| 379 |
+
__at_align__ c10::Half tmp[size()];
|
| 380 |
+
store(tmp);
|
| 381 |
+
for (const auto i : c10::irange(size())) {
|
| 382 |
+
if (_isnan(tmp[i]) || _isinf(tmp[i])) {
|
| 383 |
+
return true;
|
| 384 |
+
}
|
| 385 |
+
}
|
| 386 |
+
return false;
|
| 387 |
+
}
|
| 388 |
+
Vectorized<c10::Half> map(c10::Half (*const f)(c10::Half)) const {
|
| 389 |
+
__at_align__ c10::Half tmp[size()];
|
| 390 |
+
store(tmp);
|
| 391 |
+
for (const auto i : c10::irange(size())) {
|
| 392 |
+
tmp[i] = f(tmp[i]);
|
| 393 |
+
}
|
| 394 |
+
return loadu(tmp);
|
| 395 |
+
}
|
| 396 |
+
Vectorized<c10::Half> abs() const {
|
| 397 |
+
return Vectorized<c10::Half>(
|
| 398 |
+
vabsq_f16(values.val[0]), vabsq_f16(values.val[1]));
|
| 399 |
+
}
|
| 400 |
+
Vectorized<c10::Half> angle() const {
|
| 401 |
+
auto zero = Vectorized<c10::Half>(0);
|
| 402 |
+
auto pi = Vectorized<c10::Half>(c10::pi<c10::Half>);
|
| 403 |
+
auto tmp = blendv(zero, pi, *this < zero);
|
| 404 |
+
return blendv(tmp, *this, isnan());
|
| 405 |
+
}
|
| 406 |
+
Vectorized<c10::Half> real() const {
|
| 407 |
+
return *this;
|
| 408 |
+
}
|
| 409 |
+
Vectorized<c10::Half> imag() const {
|
| 410 |
+
return Vectorized<c10::Half>(0);
|
| 411 |
+
}
|
| 412 |
+
Vectorized<c10::Half> conj() const {
|
| 413 |
+
return *this;
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
// Sleef does not support FP16, so many math functions are applied by
|
| 417 |
+
// converting to FP32, applying the math function, and then converting back to
|
| 418 |
+
// FP16.
|
| 419 |
+
Vectorized<c10::Half> acos() const {
|
| 420 |
+
return map_with_vec_float_method(&Vectorized<float>::acos);
|
| 421 |
+
}
|
| 422 |
+
Vectorized<c10::Half> acosh() const {
|
| 423 |
+
return map_with_vec_float_method(&Vectorized<float>::acosh);
|
| 424 |
+
}
|
| 425 |
+
Vectorized<c10::Half> asin() const {
|
| 426 |
+
return map_with_vec_float_method(&Vectorized<float>::asin);
|
| 427 |
+
}
|
| 428 |
+
Vectorized<c10::Half> atan() const {
|
| 429 |
+
return map_with_vec_float_method(&Vectorized<float>::atan);
|
| 430 |
+
}
|
| 431 |
+
Vectorized<c10::Half> atanh() const {
|
| 432 |
+
return map_with_vec_float_method(&Vectorized<float>::atanh);
|
| 433 |
+
}
|
| 434 |
+
Vectorized<c10::Half> atan2(const Vectorized<c10::Half>& exp) const {
|
| 435 |
+
return map2_with_vec_float_method(exp, &Vectorized<float>::atan2);
|
| 436 |
+
}
|
| 437 |
+
Vectorized<c10::Half> copysign(const Vectorized<c10::Half>& sign) const {
|
| 438 |
+
return map2_with_vec_float_method(sign, &Vectorized<float>::copysign);
|
| 439 |
+
}
|
| 440 |
+
Vectorized<c10::Half> erf() const {
|
| 441 |
+
return map_with_vec_float_method(&Vectorized<float>::erf);
|
| 442 |
+
}
|
| 443 |
+
Vectorized<c10::Half> erfc() const {
|
| 444 |
+
return map_with_vec_float_method(&Vectorized<float>::erfc);
|
| 445 |
+
}
|
| 446 |
+
Vectorized<c10::Half> erfinv() const {
|
| 447 |
+
return map_with_vec_float_method(&Vectorized<float>::erfinv);
|
| 448 |
+
}
|
| 449 |
+
Vectorized<c10::Half> exp() const {
|
| 450 |
+
return map_with_vec_float_method(&Vectorized<float>::exp);
|
| 451 |
+
}
|
| 452 |
+
Vectorized<c10::Half> exp2() const {
|
| 453 |
+
return map_with_vec_float_method(&Vectorized<float>::exp2);
|
| 454 |
+
}
|
| 455 |
+
Vectorized<c10::Half> expm1() const {
|
| 456 |
+
return map_with_vec_float_method(&Vectorized<float>::expm1);
|
| 457 |
+
}
|
| 458 |
+
Vectorized<c10::Half> exp_u20() const {
|
| 459 |
+
return map_with_vec_float_method(&Vectorized<float>::exp_u20);
|
| 460 |
+
}
|
| 461 |
+
Vectorized<c10::Half> fmod(const Vectorized<c10::Half>& q) const {
|
| 462 |
+
// This function is questionable with a conversion, so we use map2
|
| 463 |
+
return map2(q, std::fmod);
|
| 464 |
+
}
|
| 465 |
+
Vectorized<c10::Half> hypot(const Vectorized<c10::Half>& b) const {
|
| 466 |
+
return map2_with_vec_float_method(b, &Vectorized<float>::hypot);
|
| 467 |
+
}
|
| 468 |
+
Vectorized<c10::Half> i0() const {
|
| 469 |
+
return map_with_vec_float_method(&Vectorized<float>::i0);
|
| 470 |
+
}
|
| 471 |
+
Vectorized<c10::Half> i0e() const {
|
| 472 |
+
return map_with_vec_float_method(&Vectorized<float>::i0e);
|
| 473 |
+
}
|
| 474 |
+
Vectorized<c10::Half> digamma() const {
|
| 475 |
+
return map_with_vec_float_method(&Vectorized<float>::digamma);
|
| 476 |
+
}
|
| 477 |
+
Vectorized<c10::Half> igamma(const Vectorized<c10::Half>& x) const {
|
| 478 |
+
return map2_with_vec_float_method(x, &Vectorized<float>::igamma);
|
| 479 |
+
}
|
| 480 |
+
Vectorized<c10::Half> igammac(const Vectorized<c10::Half>& x) const {
|
| 481 |
+
return map2_with_vec_float_method(x, &Vectorized<float>::igammac);
|
| 482 |
+
}
|
| 483 |
+
Vectorized<c10::Half> log() const {
|
| 484 |
+
return map_with_vec_float_method(&Vectorized<float>::log);
|
| 485 |
+
}
|
| 486 |
+
Vectorized<c10::Half> log10() const {
|
| 487 |
+
return map_with_vec_float_method(&Vectorized<float>::log10);
|
| 488 |
+
}
|
| 489 |
+
Vectorized<c10::Half> log1p() const {
|
| 490 |
+
return map_with_vec_float_method(&Vectorized<float>::log1p);
|
| 491 |
+
}
|
| 492 |
+
Vectorized<c10::Half> log2() const {
|
| 493 |
+
return map_with_vec_float_method(&Vectorized<float>::log2);
|
| 494 |
+
}
|
| 495 |
+
Vectorized<c10::Half> nextafter(const Vectorized<c10::Half>& b) const {
|
| 496 |
+
// This function does not make sense with conversion, so we use map2
|
| 497 |
+
return map2(b, std::nextafter);
|
| 498 |
+
}
|
| 499 |
+
Vectorized<c10::Half> frac() const;
|
| 500 |
+
Vectorized<c10::Half> sin() const {
|
| 501 |
+
return map_with_vec_float_method(&Vectorized<float>::sin);
|
| 502 |
+
}
|
| 503 |
+
Vectorized<c10::Half> sinh() const {
|
| 504 |
+
return map_with_vec_float_method(&Vectorized<float>::sinh);
|
| 505 |
+
}
|
| 506 |
+
Vectorized<c10::Half> cos() const {
|
| 507 |
+
return map_with_vec_float_method(&Vectorized<float>::cos);
|
| 508 |
+
}
|
| 509 |
+
Vectorized<c10::Half> cosh() const {
|
| 510 |
+
return map_with_vec_float_method(&Vectorized<float>::cosh);
|
| 511 |
+
}
|
| 512 |
+
Vectorized<c10::Half> ceil() const {
|
| 513 |
+
// This function is questionable with a conversion, so we use map
|
| 514 |
+
return map(at::native::ceil_impl);
|
| 515 |
+
}
|
| 516 |
+
Vectorized<c10::Half> floor() const {
|
| 517 |
+
// This function is questionable with a conversion, so we use map
|
| 518 |
+
return map(at::native::floor_impl);
|
| 519 |
+
}
|
| 520 |
+
Vectorized<c10::Half> neg() const {
|
| 521 |
+
return Vectorized<c10::Half>(
|
| 522 |
+
vnegq_f16(values.val[0]), vnegq_f16(values.val[1]));
|
| 523 |
+
}
|
| 524 |
+
inline Vectorized<c10::Half> round() const {
|
| 525 |
+
// This function is questionable with a conversion, so we use map
|
| 526 |
+
return map(at::native::round_impl);
|
| 527 |
+
}
|
| 528 |
+
inline Vectorized<c10::Half> tan() const {
|
| 529 |
+
return map_with_vec_float_method(&Vectorized<float>::tan);
|
| 530 |
+
}
|
| 531 |
+
inline Vectorized<c10::Half> tanh() const {
|
| 532 |
+
return map_with_vec_float_method(&Vectorized<float>::tanh);
|
| 533 |
+
}
|
| 534 |
+
Vectorized<c10::Half> trunc() const {
|
| 535 |
+
float16x8_t r0 = vrndq_f16(values.val[0]);
|
| 536 |
+
float16x8_t r1 = vrndq_f16(values.val[1]);
|
| 537 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 538 |
+
}
|
| 539 |
+
Vectorized<c10::Half> lgamma() const {
|
| 540 |
+
return map_with_vec_float_method(&Vectorized<float>::lgamma);
|
| 541 |
+
}
|
| 542 |
+
Vectorized<c10::Half> sqrt() const {
|
| 543 |
+
return Vectorized<c10::Half>(
|
| 544 |
+
vsqrtq_f16(values.val[0]), vsqrtq_f16(values.val[1]));
|
| 545 |
+
}
|
| 546 |
+
Vectorized<c10::Half> reciprocal() const {
|
| 547 |
+
auto ones = vdupq_n_f16(1.0f);
|
| 548 |
+
auto r0 = vdivq_f16(ones, values.val[0]);
|
| 549 |
+
auto r1 = vdivq_f16(ones, values.val[1]);
|
| 550 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 551 |
+
}
|
| 552 |
+
Vectorized<c10::Half> rsqrt() const {
|
| 553 |
+
return this->sqrt().reciprocal();
|
| 554 |
+
}
|
| 555 |
+
Vectorized<c10::Half> pow(const Vectorized<c10::Half>& exp) const {
|
| 556 |
+
return map2_with_vec_float_method(exp, &Vectorized<float>::pow);
|
| 557 |
+
}
|
| 558 |
+
Vectorized<c10::Half> operator==(const Vectorized<c10::Half>& other) const {
|
| 559 |
+
float16x8_t r0 =
|
| 560 |
+
vreinterpretq_f16_u16(vceqq_f16(values.val[0], other.values.val[0]));
|
| 561 |
+
float16x8_t r1 =
|
| 562 |
+
vreinterpretq_f16_u16(vceqq_f16(values.val[1], other.values.val[1]));
|
| 563 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
Vectorized<c10::Half> operator!=(const Vectorized<c10::Half>& other) const {
|
| 567 |
+
float16x8_t r0 = vreinterpretq_f16_u16(
|
| 568 |
+
vmvnq_u16(vceqq_f16(values.val[0], other.values.val[0])));
|
| 569 |
+
float16x8_t r1 = vreinterpretq_f16_u16(
|
| 570 |
+
vmvnq_u16(vceqq_f16(values.val[1], other.values.val[1])));
|
| 571 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 572 |
+
}
|
| 573 |
+
|
| 574 |
+
Vectorized<c10::Half> operator<(const Vectorized<c10::Half>& other) const {
|
| 575 |
+
float16x8_t r0 =
|
| 576 |
+
vreinterpretq_f16_u16(vcltq_f16(values.val[0], other.values.val[0]));
|
| 577 |
+
float16x8_t r1 =
|
| 578 |
+
vreinterpretq_f16_u16(vcltq_f16(values.val[1], other.values.val[1]));
|
| 579 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
Vectorized<c10::Half> operator<=(const Vectorized<c10::Half>& other) const {
|
| 583 |
+
float16x8_t r0 =
|
| 584 |
+
vreinterpretq_f16_u16(vcleq_f16(values.val[0], other.values.val[0]));
|
| 585 |
+
float16x8_t r1 =
|
| 586 |
+
vreinterpretq_f16_u16(vcleq_f16(values.val[1], other.values.val[1]));
|
| 587 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 588 |
+
}
|
| 589 |
+
|
| 590 |
+
Vectorized<c10::Half> operator>(const Vectorized<c10::Half>& other) const {
|
| 591 |
+
float16x8_t r0 =
|
| 592 |
+
vreinterpretq_f16_u16(vcgtq_f16(values.val[0], other.values.val[0]));
|
| 593 |
+
float16x8_t r1 =
|
| 594 |
+
vreinterpretq_f16_u16(vcgtq_f16(values.val[1], other.values.val[1]));
|
| 595 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
Vectorized<c10::Half> operator>=(const Vectorized<c10::Half>& other) const {
|
| 599 |
+
float16x8_t r0 =
|
| 600 |
+
vreinterpretq_f16_u16(vcgeq_f16(values.val[0], other.values.val[0]));
|
| 601 |
+
float16x8_t r1 =
|
| 602 |
+
vreinterpretq_f16_u16(vcgeq_f16(values.val[1], other.values.val[1]));
|
| 603 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 604 |
+
}
|
| 605 |
+
|
| 606 |
+
Vectorized<c10::Half> eq(const Vectorized<c10::Half>& other) const;
|
| 607 |
+
Vectorized<c10::Half> ne(const Vectorized<c10::Half>& other) const;
|
| 608 |
+
Vectorized<c10::Half> gt(const Vectorized<c10::Half>& other) const;
|
| 609 |
+
Vectorized<c10::Half> ge(const Vectorized<c10::Half>& other) const;
|
| 610 |
+
Vectorized<c10::Half> lt(const Vectorized<c10::Half>& other) const;
|
| 611 |
+
Vectorized<c10::Half> le(const Vectorized<c10::Half>& other) const;
|
| 612 |
+
}; // Vectorized<Half>
|
| 613 |
+
|
| 614 |
+
template <>
|
| 615 |
+
Vectorized<c10::Half> inline operator+(
|
| 616 |
+
const Vectorized<c10::Half>& a,
|
| 617 |
+
const Vectorized<c10::Half>& b) {
|
| 618 |
+
float16x8_t r0 = vaddq_f16(a.get_low(), b.get_low());
|
| 619 |
+
float16x8_t r1 = vaddq_f16(a.get_high(), b.get_high());
|
| 620 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
template <>
|
| 624 |
+
Vectorized<c10::Half> inline operator-(
|
| 625 |
+
const Vectorized<c10::Half>& a,
|
| 626 |
+
const Vectorized<c10::Half>& b) {
|
| 627 |
+
float16x8_t r0 = vsubq_f16(a.get_low(), b.get_low());
|
| 628 |
+
float16x8_t r1 = vsubq_f16(a.get_high(), b.get_high());
|
| 629 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
template <>
|
| 633 |
+
Vectorized<c10::Half> inline operator*(
|
| 634 |
+
const Vectorized<c10::Half>& a,
|
| 635 |
+
const Vectorized<c10::Half>& b) {
|
| 636 |
+
float16x8_t r0 = vmulq_f16(a.get_low(), b.get_low());
|
| 637 |
+
float16x8_t r1 = vmulq_f16(a.get_high(), b.get_high());
|
| 638 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
template <>
|
| 642 |
+
Vectorized<c10::Half> inline operator/(
|
| 643 |
+
const Vectorized<c10::Half>& a,
|
| 644 |
+
const Vectorized<c10::Half>& b) {
|
| 645 |
+
float16x8_t r0 = vdivq_f16(a.get_low(), b.get_low());
|
| 646 |
+
float16x8_t r1 = vdivq_f16(a.get_high(), b.get_high());
|
| 647 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 648 |
+
}
|
| 649 |
+
|
| 650 |
+
// frac. Implement this here so we can use subtraction
|
| 651 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::frac() const {
|
| 652 |
+
return *this - this->trunc();
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 656 |
+
// either input is a NaN.
|
| 657 |
+
template <>
|
| 658 |
+
Vectorized<c10::Half> inline maximum(
|
| 659 |
+
const Vectorized<c10::Half>& a,
|
| 660 |
+
const Vectorized<c10::Half>& b) {
|
| 661 |
+
float16x8_t r0 = vmaxq_f16(a.get_low(), b.get_low());
|
| 662 |
+
float16x8_t r1 = vmaxq_f16(a.get_high(), b.get_high());
|
| 663 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 667 |
+
// either input is a NaN.
|
| 668 |
+
template <>
|
| 669 |
+
Vectorized<c10::Half> inline minimum(
|
| 670 |
+
const Vectorized<c10::Half>& a,
|
| 671 |
+
const Vectorized<c10::Half>& b) {
|
| 672 |
+
float16x8_t r0 = vminq_f16(a.get_low(), b.get_low());
|
| 673 |
+
float16x8_t r1 = vminq_f16(a.get_high(), b.get_high());
|
| 674 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 675 |
+
}
|
| 676 |
+
|
| 677 |
+
template <>
|
| 678 |
+
Vectorized<c10::Half> inline clamp(
|
| 679 |
+
const Vectorized<c10::Half>& a,
|
| 680 |
+
const Vectorized<c10::Half>& min,
|
| 681 |
+
const Vectorized<c10::Half>& max) {
|
| 682 |
+
return minimum(max, maximum(min, a));
|
| 683 |
+
}
|
| 684 |
+
|
| 685 |
+
template <>
|
| 686 |
+
Vectorized<c10::Half> inline clamp_max(
|
| 687 |
+
const Vectorized<c10::Half>& a,
|
| 688 |
+
const Vectorized<c10::Half>& max) {
|
| 689 |
+
return minimum(max, a);
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
template <>
|
| 693 |
+
Vectorized<c10::Half> inline clamp_min(
|
| 694 |
+
const Vectorized<c10::Half>& a,
|
| 695 |
+
const Vectorized<c10::Half>& min) {
|
| 696 |
+
return maximum(min, a);
|
| 697 |
+
}
|
| 698 |
+
|
| 699 |
+
template <>
|
| 700 |
+
Vectorized<c10::Half> inline operator&(
|
| 701 |
+
const Vectorized<c10::Half>& a,
|
| 702 |
+
const Vectorized<c10::Half>& b) {
|
| 703 |
+
float16x8_t r0 = vreinterpretq_f16_u16(vandq_u16(
|
| 704 |
+
vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low())));
|
| 705 |
+
float16x8_t r1 = vreinterpretq_f16_u16(vandq_u16(
|
| 706 |
+
vreinterpretq_u16_f16(a.get_high()),
|
| 707 |
+
vreinterpretq_u16_f16(b.get_high())));
|
| 708 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
template <>
|
| 712 |
+
Vectorized<c10::Half> inline operator|(
|
| 713 |
+
const Vectorized<c10::Half>& a,
|
| 714 |
+
const Vectorized<c10::Half>& b) {
|
| 715 |
+
float16x8_t r0 = vreinterpretq_f16_u16(vorrq_u16(
|
| 716 |
+
vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low())));
|
| 717 |
+
float16x8_t r1 = vreinterpretq_f16_u16(vorrq_u16(
|
| 718 |
+
vreinterpretq_u16_f16(a.get_high()),
|
| 719 |
+
vreinterpretq_u16_f16(b.get_high())));
|
| 720 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
template <>
|
| 724 |
+
Vectorized<c10::Half> inline operator^(
|
| 725 |
+
const Vectorized<c10::Half>& a,
|
| 726 |
+
const Vectorized<c10::Half>& b) {
|
| 727 |
+
float16x8_t r0 = vreinterpretq_f16_u16(veorq_u16(
|
| 728 |
+
vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low())));
|
| 729 |
+
float16x8_t r1 = vreinterpretq_f16_u16(veorq_u16(
|
| 730 |
+
vreinterpretq_u16_f16(a.get_high()),
|
| 731 |
+
vreinterpretq_u16_f16(b.get_high())));
|
| 732 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::eq(
|
| 736 |
+
const Vectorized<c10::Half>& other) const {
|
| 737 |
+
return (*this == other) & Vectorized<c10::Half>(1);
|
| 738 |
+
}
|
| 739 |
+
|
| 740 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::ne(
|
| 741 |
+
const Vectorized<c10::Half>& other) const {
|
| 742 |
+
return (*this != other) & Vectorized<c10::Half>(1);
|
| 743 |
+
}
|
| 744 |
+
|
| 745 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::gt(
|
| 746 |
+
const Vectorized<c10::Half>& other) const {
|
| 747 |
+
return (*this > other) & Vectorized<c10::Half>(1);
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::ge(
|
| 751 |
+
const Vectorized<c10::Half>& other) const {
|
| 752 |
+
return (*this >= other) & Vectorized<c10::Half>(1);
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::lt(
|
| 756 |
+
const Vectorized<c10::Half>& other) const {
|
| 757 |
+
return (*this < other) & Vectorized<c10::Half>(1);
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
inline Vectorized<c10::Half> Vectorized<c10::Half>::le(
|
| 761 |
+
const Vectorized<c10::Half>& other) const {
|
| 762 |
+
return (*this <= other) & Vectorized<c10::Half>(1);
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
template <>
|
| 766 |
+
inline void convert(const float16_t* src, int16_t* dst, int64_t n) {
|
| 767 |
+
int64_t i;
|
| 768 |
+
#pragma unroll
|
| 769 |
+
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
| 770 |
+
i += Vectorized<c10::Half>::size()) {
|
| 771 |
+
vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i)));
|
| 772 |
+
vst1q_s16(dst + i + 8, vcvtq_s16_f16(vld1q_f16(src + i + 8)));
|
| 773 |
+
}
|
| 774 |
+
#pragma unroll
|
| 775 |
+
for (; i < n; i++) {
|
| 776 |
+
dst[i] = static_cast<int16_t>(src[i]);
|
| 777 |
+
}
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
template <>
|
| 781 |
+
inline void convert(const int16_t* src, float16_t* dst, int64_t n) {
|
| 782 |
+
int64_t i;
|
| 783 |
+
#pragma unroll
|
| 784 |
+
for (i = 0; i <= (n - Vectorized<c10::Half>::size());
|
| 785 |
+
i += Vectorized<c10::Half>::size()) {
|
| 786 |
+
vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i)));
|
| 787 |
+
vst1q_f16(dst + i + 8, vcvtq_f16_s16(vld1q_s16(src + i + 8)));
|
| 788 |
+
}
|
| 789 |
+
#pragma unroll
|
| 790 |
+
for (; i < n; i++) {
|
| 791 |
+
dst[i] = static_cast<float16_t>(src[i]);
|
| 792 |
+
}
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
template <>
|
| 796 |
+
Vectorized<c10::Half> inline fmadd(
|
| 797 |
+
const Vectorized<c10::Half>& a,
|
| 798 |
+
const Vectorized<c10::Half>& b,
|
| 799 |
+
const Vectorized<c10::Half>& c) {
|
| 800 |
+
float16x8_t r0 = vfmaq_f16(c.get_low(), a.get_low(), b.get_low());
|
| 801 |
+
float16x8_t r1 = vfmaq_f16(c.get_high(), a.get_high(), b.get_high());
|
| 802 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
template <>
|
| 806 |
+
Vectorized<c10::Half> inline fmsub(
|
| 807 |
+
const Vectorized<c10::Half>& a,
|
| 808 |
+
const Vectorized<c10::Half>& b,
|
| 809 |
+
const Vectorized<c10::Half>& c) {
|
| 810 |
+
float16x8_t r0 = vfmsq_f16(c.get_low(), a.get_low(), b.get_low());
|
| 811 |
+
float16x8_t r1 = vfmsq_f16(c.get_high(), a.get_high(), b.get_high());
|
| 812 |
+
return Vectorized<c10::Half>(r0, r1);
|
| 813 |
+
}
|
| 814 |
+
|
| 815 |
+
#endif /* defined(aarch64) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(C10_MOBILE) */
|
| 816 |
+
|
| 817 |
+
} // namespace CPU_CAPABILITY
|
| 818 |
+
} // namespace at::vec
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h
ADDED
|
@@ -0,0 +1,1586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
#include <c10/util/irange.h>
|
| 10 |
+
|
| 11 |
+
namespace at::vec {
|
| 12 |
+
inline namespace CPU_CAPABILITY {
|
| 13 |
+
|
| 14 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 15 |
+
|
| 16 |
+
struct Vectorizedi {
|
| 17 |
+
protected:
|
| 18 |
+
__m256i values;
|
| 19 |
+
|
| 20 |
+
static inline __m256i invert(const __m256i& v) {
|
| 21 |
+
const auto ones = _mm256_set1_epi64x(-1);
|
| 22 |
+
return _mm256_xor_si256(ones, v);
|
| 23 |
+
}
|
| 24 |
+
public:
|
| 25 |
+
Vectorizedi() {}
|
| 26 |
+
Vectorizedi(__m256i v) : values(v) {}
|
| 27 |
+
operator __m256i() const {
|
| 28 |
+
return values;
|
| 29 |
+
}
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
#else
|
| 33 |
+
|
| 34 |
+
struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
|
| 35 |
+
|
| 36 |
+
#endif // CPU_CAPABILITY_AVX2
|
| 37 |
+
|
| 38 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 39 |
+
|
| 40 |
+
template <>
|
| 41 |
+
class Vectorized<int64_t> : public Vectorizedi {
|
| 42 |
+
private:
|
| 43 |
+
static const Vectorized<int64_t> ones;
|
| 44 |
+
public:
|
| 45 |
+
using value_type = int64_t;
|
| 46 |
+
using size_type = int;
|
| 47 |
+
static constexpr size_type size() {
|
| 48 |
+
return 4;
|
| 49 |
+
}
|
| 50 |
+
using Vectorizedi::Vectorizedi;
|
| 51 |
+
Vectorized() {}
|
| 52 |
+
Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); }
|
| 53 |
+
Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) {
|
| 54 |
+
values = _mm256_setr_epi64x(val1, val2, val3, val4);
|
| 55 |
+
}
|
| 56 |
+
template <int64_t mask>
|
| 57 |
+
static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
|
| 58 |
+
__at_align__ int64_t tmp_values[size()];
|
| 59 |
+
a.store(tmp_values);
|
| 60 |
+
if (mask & 0x01)
|
| 61 |
+
tmp_values[0] = _mm256_extract_epi64(b.values, 0);
|
| 62 |
+
if (mask & 0x02)
|
| 63 |
+
tmp_values[1] = _mm256_extract_epi64(b.values, 1);
|
| 64 |
+
if (mask & 0x04)
|
| 65 |
+
tmp_values[2] = _mm256_extract_epi64(b.values, 2);
|
| 66 |
+
if (mask & 0x08)
|
| 67 |
+
tmp_values[3] = _mm256_extract_epi64(b.values, 3);
|
| 68 |
+
return loadu(tmp_values);
|
| 69 |
+
}
|
| 70 |
+
static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
|
| 71 |
+
const Vectorized<int64_t>& mask) {
|
| 72 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
| 73 |
+
}
|
| 74 |
+
template <typename step_t>
|
| 75 |
+
static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 76 |
+
return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
|
| 77 |
+
}
|
| 78 |
+
static Vectorized<int64_t>
|
| 79 |
+
set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
|
| 80 |
+
switch (count) {
|
| 81 |
+
case 0:
|
| 82 |
+
return a;
|
| 83 |
+
case 1:
|
| 84 |
+
return blend<1>(a, b);
|
| 85 |
+
case 2:
|
| 86 |
+
return blend<3>(a, b);
|
| 87 |
+
case 3:
|
| 88 |
+
return blend<7>(a, b);
|
| 89 |
+
}
|
| 90 |
+
return b;
|
| 91 |
+
}
|
| 92 |
+
static Vectorized<int64_t> loadu(const void* ptr) {
|
| 93 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
| 94 |
+
}
|
| 95 |
+
static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
|
| 96 |
+
__at_align__ int64_t tmp_values[size()];
|
| 97 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 98 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 99 |
+
// instructions while a loop would be compiled to one instruction.
|
| 100 |
+
for (const auto i : c10::irange(size())) {
|
| 101 |
+
tmp_values[i] = 0;
|
| 102 |
+
}
|
| 103 |
+
std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
|
| 104 |
+
return loadu(tmp_values);
|
| 105 |
+
}
|
| 106 |
+
void store(void* ptr, int count = size()) const {
|
| 107 |
+
if (count == size()) {
|
| 108 |
+
// ptr need not to be aligned here. See
|
| 109 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
|
| 110 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
| 111 |
+
} else if (count > 0) {
|
| 112 |
+
__at_align__ int64_t tmp_values[size()];
|
| 113 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
| 114 |
+
std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
const int64_t& operator[](int idx) const = delete;
|
| 118 |
+
int64_t& operator[](int idx) = delete;
|
| 119 |
+
Vectorized<int64_t> abs() const {
|
| 120 |
+
auto zero = _mm256_set1_epi64x(0);
|
| 121 |
+
auto is_larger = _mm256_cmpgt_epi64(zero, values);
|
| 122 |
+
auto inverse = _mm256_xor_si256(values, is_larger);
|
| 123 |
+
return _mm256_sub_epi64(inverse, is_larger);
|
| 124 |
+
}
|
| 125 |
+
Vectorized<int64_t> real() const {
|
| 126 |
+
return *this;
|
| 127 |
+
}
|
| 128 |
+
Vectorized<int64_t> imag() const {
|
| 129 |
+
return _mm256_set1_epi64x(0);
|
| 130 |
+
}
|
| 131 |
+
Vectorized<int64_t> conj() const {
|
| 132 |
+
return *this;
|
| 133 |
+
}
|
| 134 |
+
Vectorized<int64_t> neg() const;
|
| 135 |
+
Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
|
| 136 |
+
return _mm256_cmpeq_epi64(values, other.values);
|
| 137 |
+
}
|
| 138 |
+
Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
|
| 139 |
+
return invert(_mm256_cmpeq_epi64(values, other.values));
|
| 140 |
+
}
|
| 141 |
+
Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
|
| 142 |
+
return _mm256_cmpgt_epi64(other.values, values);
|
| 143 |
+
}
|
| 144 |
+
Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
|
| 145 |
+
return invert(_mm256_cmpgt_epi64(values, other.values));
|
| 146 |
+
}
|
| 147 |
+
Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
|
| 148 |
+
return _mm256_cmpgt_epi64(values, other.values);
|
| 149 |
+
}
|
| 150 |
+
Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
|
| 151 |
+
return invert(_mm256_cmpgt_epi64(other.values, values));
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
|
| 155 |
+
Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
|
| 156 |
+
Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
|
| 157 |
+
Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
|
| 158 |
+
Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
|
| 159 |
+
Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
|
| 160 |
+
};
|
| 161 |
+
|
| 162 |
+
template <>
|
| 163 |
+
class Vectorized<int32_t> : public Vectorizedi {
|
| 164 |
+
private:
|
| 165 |
+
static const Vectorized<int32_t> ones;
|
| 166 |
+
public:
|
| 167 |
+
using value_type = int32_t;
|
| 168 |
+
static constexpr int size() {
|
| 169 |
+
return 8;
|
| 170 |
+
}
|
| 171 |
+
using Vectorizedi::Vectorizedi;
|
| 172 |
+
Vectorized() {}
|
| 173 |
+
Vectorized(int32_t v) { values = _mm256_set1_epi32(v); }
|
| 174 |
+
Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
|
| 175 |
+
int32_t val5, int32_t val6, int32_t val7, int32_t val8) {
|
| 176 |
+
values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8);
|
| 177 |
+
}
|
| 178 |
+
template <int64_t mask>
|
| 179 |
+
static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
|
| 180 |
+
return _mm256_blend_epi32(a, b, mask);
|
| 181 |
+
}
|
| 182 |
+
static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
|
| 183 |
+
const Vectorized<int32_t>& mask) {
|
| 184 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
| 185 |
+
}
|
| 186 |
+
template <typename step_t>
|
| 187 |
+
static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 188 |
+
return Vectorized<int32_t>(
|
| 189 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 190 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
|
| 191 |
+
}
|
| 192 |
+
static Vectorized<int32_t>
|
| 193 |
+
set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
|
| 194 |
+
switch (count) {
|
| 195 |
+
case 0:
|
| 196 |
+
return a;
|
| 197 |
+
case 1:
|
| 198 |
+
return blend<1>(a, b);
|
| 199 |
+
case 2:
|
| 200 |
+
return blend<3>(a, b);
|
| 201 |
+
case 3:
|
| 202 |
+
return blend<7>(a, b);
|
| 203 |
+
case 4:
|
| 204 |
+
return blend<15>(a, b);
|
| 205 |
+
case 5:
|
| 206 |
+
return blend<31>(a, b);
|
| 207 |
+
case 6:
|
| 208 |
+
return blend<63>(a, b);
|
| 209 |
+
case 7:
|
| 210 |
+
return blend<127>(a, b);
|
| 211 |
+
}
|
| 212 |
+
return b;
|
| 213 |
+
}
|
| 214 |
+
static Vectorized<int32_t> loadu(const void* ptr) {
|
| 215 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
| 216 |
+
}
|
| 217 |
+
static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
|
| 218 |
+
__at_align__ int32_t tmp_values[size()];
|
| 219 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 220 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 221 |
+
// instructions while a loop would be compiled to one instruction.
|
| 222 |
+
for (const auto i : c10::irange(size())) {
|
| 223 |
+
tmp_values[i] = 0;
|
| 224 |
+
}
|
| 225 |
+
std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
|
| 226 |
+
return loadu(tmp_values);
|
| 227 |
+
}
|
| 228 |
+
void store(void* ptr, int count = size()) const {
|
| 229 |
+
if (count == size()) {
|
| 230 |
+
// ptr need not to be aligned here. See
|
| 231 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
|
| 232 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
| 233 |
+
} else if (count > 0) {
|
| 234 |
+
__at_align__ int32_t tmp_values[size()];
|
| 235 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
| 236 |
+
std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
|
| 237 |
+
}
|
| 238 |
+
}
|
| 239 |
+
const int32_t& operator[](int idx) const = delete;
|
| 240 |
+
int32_t& operator[](int idx) = delete;
|
| 241 |
+
Vectorized<int32_t> abs() const {
|
| 242 |
+
return _mm256_abs_epi32(values);
|
| 243 |
+
}
|
| 244 |
+
Vectorized<int32_t> real() const {
|
| 245 |
+
return *this;
|
| 246 |
+
}
|
| 247 |
+
Vectorized<int32_t> imag() const {
|
| 248 |
+
return _mm256_set1_epi32(0);
|
| 249 |
+
}
|
| 250 |
+
Vectorized<int32_t> conj() const {
|
| 251 |
+
return *this;
|
| 252 |
+
}
|
| 253 |
+
Vectorized<int32_t> neg() const;
|
| 254 |
+
Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
|
| 255 |
+
return _mm256_cmpeq_epi32(values, other.values);
|
| 256 |
+
}
|
| 257 |
+
Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
|
| 258 |
+
return invert(_mm256_cmpeq_epi32(values, other.values));
|
| 259 |
+
}
|
| 260 |
+
Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
|
| 261 |
+
return _mm256_cmpgt_epi32(other.values, values);
|
| 262 |
+
}
|
| 263 |
+
Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
|
| 264 |
+
return invert(_mm256_cmpgt_epi32(values, other.values));
|
| 265 |
+
}
|
| 266 |
+
Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
|
| 267 |
+
return _mm256_cmpgt_epi32(values, other.values);
|
| 268 |
+
}
|
| 269 |
+
Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
|
| 270 |
+
return invert(_mm256_cmpgt_epi32(other.values, values));
|
| 271 |
+
}
|
| 272 |
+
Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
|
| 273 |
+
Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
|
| 274 |
+
Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
|
| 275 |
+
Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
|
| 276 |
+
Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
|
| 277 |
+
Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
|
| 278 |
+
};
|
| 279 |
+
|
| 280 |
+
template <>
|
| 281 |
+
inline void convert(const int32_t *src, float *dst, int64_t n) {
|
| 282 |
+
int64_t i;
|
| 283 |
+
// int32_t and float have same size
|
| 284 |
+
#ifndef _MSC_VER
|
| 285 |
+
# pragma unroll
|
| 286 |
+
#endif
|
| 287 |
+
for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
|
| 288 |
+
auto input_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
|
| 289 |
+
auto output_vec = _mm256_cvtepi32_ps(input_vec);
|
| 290 |
+
_mm256_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
|
| 291 |
+
}
|
| 292 |
+
#ifndef _MSC_VER
|
| 293 |
+
# pragma unroll
|
| 294 |
+
#endif
|
| 295 |
+
for (; i < n; i++) {
|
| 296 |
+
dst[i] = static_cast<float>(src[i]);
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
template <>
|
| 301 |
+
inline void convert(const int32_t *src, double *dst, int64_t n) {
|
| 302 |
+
int64_t i;
|
| 303 |
+
// int32_t has half the size of double
|
| 304 |
+
#ifndef _MSC_VER
|
| 305 |
+
# pragma unroll
|
| 306 |
+
#endif
|
| 307 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
| 308 |
+
auto input_128_vec = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src + i));
|
| 309 |
+
auto output_vec = _mm256_cvtepi32_pd(input_128_vec);
|
| 310 |
+
_mm256_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
|
| 311 |
+
}
|
| 312 |
+
#ifndef _MSC_VER
|
| 313 |
+
# pragma unroll
|
| 314 |
+
#endif
|
| 315 |
+
for (; i < n; i++) {
|
| 316 |
+
dst[i] = static_cast<double>(src[i]);
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
template <>
|
| 321 |
+
class Vectorized<int16_t> : public Vectorizedi {
|
| 322 |
+
private:
|
| 323 |
+
static const Vectorized<int16_t> ones;
|
| 324 |
+
public:
|
| 325 |
+
using value_type = int16_t;
|
| 326 |
+
static constexpr int size() {
|
| 327 |
+
return 16;
|
| 328 |
+
}
|
| 329 |
+
using Vectorizedi::Vectorizedi;
|
| 330 |
+
Vectorized() {}
|
| 331 |
+
Vectorized(int16_t v) { values = _mm256_set1_epi16(v); }
|
| 332 |
+
Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
|
| 333 |
+
int16_t val5, int16_t val6, int16_t val7, int16_t val8,
|
| 334 |
+
int16_t val9, int16_t val10, int16_t val11, int16_t val12,
|
| 335 |
+
int16_t val13, int16_t val14, int16_t val15, int16_t val16) {
|
| 336 |
+
values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8,
|
| 337 |
+
val9, val10, val11, val12, val13, val14, val15, val16);
|
| 338 |
+
}
|
| 339 |
+
template <int64_t mask>
|
| 340 |
+
static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
|
| 341 |
+
__at_align__ int16_t tmp_values[size()];
|
| 342 |
+
a.store(tmp_values);
|
| 343 |
+
if (mask & 0x01)
|
| 344 |
+
tmp_values[0] = _mm256_extract_epi16(b.values, 0);
|
| 345 |
+
if (mask & 0x02)
|
| 346 |
+
tmp_values[1] = _mm256_extract_epi16(b.values, 1);
|
| 347 |
+
if (mask & 0x04)
|
| 348 |
+
tmp_values[2] = _mm256_extract_epi16(b.values, 2);
|
| 349 |
+
if (mask & 0x08)
|
| 350 |
+
tmp_values[3] = _mm256_extract_epi16(b.values, 3);
|
| 351 |
+
if (mask & 0x10)
|
| 352 |
+
tmp_values[4] = _mm256_extract_epi16(b.values, 4);
|
| 353 |
+
if (mask & 0x20)
|
| 354 |
+
tmp_values[5] = _mm256_extract_epi16(b.values, 5);
|
| 355 |
+
if (mask & 0x40)
|
| 356 |
+
tmp_values[6] = _mm256_extract_epi16(b.values, 6);
|
| 357 |
+
if (mask & 0x80)
|
| 358 |
+
tmp_values[7] = _mm256_extract_epi16(b.values, 7);
|
| 359 |
+
if (mask & 0x100)
|
| 360 |
+
tmp_values[8] = _mm256_extract_epi16(b.values, 8);
|
| 361 |
+
if (mask & 0x200)
|
| 362 |
+
tmp_values[9] = _mm256_extract_epi16(b.values, 9);
|
| 363 |
+
if (mask & 0x400)
|
| 364 |
+
tmp_values[10] = _mm256_extract_epi16(b.values, 10);
|
| 365 |
+
if (mask & 0x800)
|
| 366 |
+
tmp_values[11] = _mm256_extract_epi16(b.values, 11);
|
| 367 |
+
if (mask & 0x1000)
|
| 368 |
+
tmp_values[12] = _mm256_extract_epi16(b.values, 12);
|
| 369 |
+
if (mask & 0x2000)
|
| 370 |
+
tmp_values[13] = _mm256_extract_epi16(b.values, 13);
|
| 371 |
+
if (mask & 0x4000)
|
| 372 |
+
tmp_values[14] = _mm256_extract_epi16(b.values, 14);
|
| 373 |
+
if (mask & 0x8000)
|
| 374 |
+
tmp_values[15] = _mm256_extract_epi16(b.values, 15);
|
| 375 |
+
return loadu(tmp_values);
|
| 376 |
+
}
|
| 377 |
+
static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
|
| 378 |
+
const Vectorized<int16_t>& mask) {
|
| 379 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
| 380 |
+
}
|
| 381 |
+
template <typename step_t>
|
| 382 |
+
static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
|
| 383 |
+
return Vectorized<int16_t>(
|
| 384 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 385 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 386 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 387 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
|
| 388 |
+
}
|
| 389 |
+
static Vectorized<int16_t>
|
| 390 |
+
set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
|
| 391 |
+
switch (count) {
|
| 392 |
+
case 0:
|
| 393 |
+
return a;
|
| 394 |
+
case 1:
|
| 395 |
+
return blend<1>(a, b);
|
| 396 |
+
case 2:
|
| 397 |
+
return blend<3>(a, b);
|
| 398 |
+
case 3:
|
| 399 |
+
return blend<7>(a, b);
|
| 400 |
+
case 4:
|
| 401 |
+
return blend<15>(a, b);
|
| 402 |
+
case 5:
|
| 403 |
+
return blend<31>(a, b);
|
| 404 |
+
case 6:
|
| 405 |
+
return blend<63>(a, b);
|
| 406 |
+
case 7:
|
| 407 |
+
return blend<127>(a, b);
|
| 408 |
+
case 8:
|
| 409 |
+
return blend<255>(a, b);
|
| 410 |
+
case 9:
|
| 411 |
+
return blend<511>(a, b);
|
| 412 |
+
case 10:
|
| 413 |
+
return blend<1023>(a, b);
|
| 414 |
+
case 11:
|
| 415 |
+
return blend<2047>(a, b);
|
| 416 |
+
case 12:
|
| 417 |
+
return blend<4095>(a, b);
|
| 418 |
+
case 13:
|
| 419 |
+
return blend<8191>(a, b);
|
| 420 |
+
case 14:
|
| 421 |
+
return blend<16383>(a, b);
|
| 422 |
+
case 15:
|
| 423 |
+
return blend<32767>(a, b);
|
| 424 |
+
}
|
| 425 |
+
return b;
|
| 426 |
+
}
|
| 427 |
+
static Vectorized<int16_t> loadu(const void* ptr) {
|
| 428 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
| 429 |
+
}
|
| 430 |
+
static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
|
| 431 |
+
__at_align__ int16_t tmp_values[size()];
|
| 432 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 433 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 434 |
+
// instructions while a loop would be compiled to one instruction.
|
| 435 |
+
for (const auto i : c10::irange(size())) {
|
| 436 |
+
tmp_values[i] = 0;
|
| 437 |
+
}
|
| 438 |
+
std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
|
| 439 |
+
return loadu(tmp_values);
|
| 440 |
+
}
|
| 441 |
+
void store(void* ptr, int count = size()) const {
|
| 442 |
+
if (count == size()) {
|
| 443 |
+
// ptr need not to be aligned here. See
|
| 444 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
|
| 445 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
| 446 |
+
} else if (count > 0) {
|
| 447 |
+
__at_align__ int16_t tmp_values[size()];
|
| 448 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
| 449 |
+
std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
|
| 450 |
+
}
|
| 451 |
+
}
|
| 452 |
+
const int16_t& operator[](int idx) const = delete;
|
| 453 |
+
int16_t& operator[](int idx) = delete;
|
| 454 |
+
Vectorized<int16_t> abs() const {
|
| 455 |
+
return _mm256_abs_epi16(values);
|
| 456 |
+
}
|
| 457 |
+
Vectorized<int16_t> real() const {
|
| 458 |
+
return *this;
|
| 459 |
+
}
|
| 460 |
+
Vectorized<int16_t> imag() const {
|
| 461 |
+
return _mm256_set1_epi16(0);
|
| 462 |
+
}
|
| 463 |
+
Vectorized<int16_t> conj() const {
|
| 464 |
+
return *this;
|
| 465 |
+
}
|
| 466 |
+
Vectorized<int16_t> neg() const;
|
| 467 |
+
Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
|
| 468 |
+
return _mm256_cmpeq_epi16(values, other.values);
|
| 469 |
+
}
|
| 470 |
+
Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
|
| 471 |
+
return invert(_mm256_cmpeq_epi16(values, other.values));
|
| 472 |
+
}
|
| 473 |
+
Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
|
| 474 |
+
return _mm256_cmpgt_epi16(other.values, values);
|
| 475 |
+
}
|
| 476 |
+
Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
|
| 477 |
+
return invert(_mm256_cmpgt_epi16(values, other.values));
|
| 478 |
+
}
|
| 479 |
+
Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
|
| 480 |
+
return _mm256_cmpgt_epi16(values, other.values);
|
| 481 |
+
}
|
| 482 |
+
Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
|
| 483 |
+
return invert(_mm256_cmpgt_epi16(other.values, values));
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
|
| 487 |
+
Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
|
| 488 |
+
Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
|
| 489 |
+
Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
|
| 490 |
+
Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
|
| 491 |
+
Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
|
| 492 |
+
};
|
| 493 |
+
|
| 494 |
+
template <typename T>
|
| 495 |
+
class Vectorized8 : public Vectorizedi {
|
| 496 |
+
static_assert(
|
| 497 |
+
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>,
|
| 498 |
+
"Only int8_t/uint8_t are supported");
|
| 499 |
+
protected:
|
| 500 |
+
static const Vectorized<T> ones;
|
| 501 |
+
public:
|
| 502 |
+
using value_type = T;
|
| 503 |
+
static constexpr int size() {
|
| 504 |
+
return 32;
|
| 505 |
+
}
|
| 506 |
+
using Vectorizedi::Vectorizedi;
|
| 507 |
+
Vectorized8() {}
|
| 508 |
+
Vectorized8(T v) { values = _mm256_set1_epi8(v); }
|
| 509 |
+
Vectorized8(T val1, T val2, T val3, T val4,
|
| 510 |
+
T val5, T val6, T val7, T val8,
|
| 511 |
+
T val9, T val10, T val11, T val12,
|
| 512 |
+
T val13, T val14, T val15, T val16,
|
| 513 |
+
T val17, T val18, T val19, T val20,
|
| 514 |
+
T val21, T val22, T val23, T val24,
|
| 515 |
+
T val25, T val26, T val27, T val28,
|
| 516 |
+
T val29, T val30, T val31, T val32) {
|
| 517 |
+
values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8,
|
| 518 |
+
val9, val10, val11, val12, val13, val14, val15, val16,
|
| 519 |
+
val17, val18, val19, val20, val21, val22, val23, val24,
|
| 520 |
+
val25, val26, val27, val28, val29, val30, val31, val32);
|
| 521 |
+
}
|
| 522 |
+
template <int64_t mask>
|
| 523 |
+
static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
|
| 524 |
+
__at_align__ T tmp_values[size()];
|
| 525 |
+
a.store(tmp_values);
|
| 526 |
+
if (mask & 0x01)
|
| 527 |
+
tmp_values[0] = _mm256_extract_epi8(b.values, 0);
|
| 528 |
+
if (mask & 0x02)
|
| 529 |
+
tmp_values[1] = _mm256_extract_epi8(b.values, 1);
|
| 530 |
+
if (mask & 0x04)
|
| 531 |
+
tmp_values[2] = _mm256_extract_epi8(b.values, 2);
|
| 532 |
+
if (mask & 0x08)
|
| 533 |
+
tmp_values[3] = _mm256_extract_epi8(b.values, 3);
|
| 534 |
+
if (mask & 0x10)
|
| 535 |
+
tmp_values[4] = _mm256_extract_epi8(b.values, 4);
|
| 536 |
+
if (mask & 0x20)
|
| 537 |
+
tmp_values[5] = _mm256_extract_epi8(b.values, 5);
|
| 538 |
+
if (mask & 0x40)
|
| 539 |
+
tmp_values[6] = _mm256_extract_epi8(b.values, 6);
|
| 540 |
+
if (mask & 0x80)
|
| 541 |
+
tmp_values[7] = _mm256_extract_epi8(b.values, 7);
|
| 542 |
+
if (mask & 0x100)
|
| 543 |
+
tmp_values[8] = _mm256_extract_epi8(b.values, 8);
|
| 544 |
+
if (mask & 0x200)
|
| 545 |
+
tmp_values[9] = _mm256_extract_epi8(b.values, 9);
|
| 546 |
+
if (mask & 0x400)
|
| 547 |
+
tmp_values[10] = _mm256_extract_epi8(b.values, 10);
|
| 548 |
+
if (mask & 0x800)
|
| 549 |
+
tmp_values[11] = _mm256_extract_epi8(b.values, 11);
|
| 550 |
+
if (mask & 0x1000)
|
| 551 |
+
tmp_values[12] = _mm256_extract_epi8(b.values, 12);
|
| 552 |
+
if (mask & 0x2000)
|
| 553 |
+
tmp_values[13] = _mm256_extract_epi8(b.values, 13);
|
| 554 |
+
if (mask & 0x4000)
|
| 555 |
+
tmp_values[14] = _mm256_extract_epi8(b.values, 14);
|
| 556 |
+
if (mask & 0x8000)
|
| 557 |
+
tmp_values[15] = _mm256_extract_epi8(b.values, 15);
|
| 558 |
+
if (mask & 0x010000)
|
| 559 |
+
tmp_values[16] = _mm256_extract_epi8(b.values, 16);
|
| 560 |
+
if (mask & 0x020000)
|
| 561 |
+
tmp_values[17] = _mm256_extract_epi8(b.values, 17);
|
| 562 |
+
if (mask & 0x040000)
|
| 563 |
+
tmp_values[18] = _mm256_extract_epi8(b.values, 18);
|
| 564 |
+
if (mask & 0x080000)
|
| 565 |
+
tmp_values[19] = _mm256_extract_epi8(b.values, 19);
|
| 566 |
+
if (mask & 0x100000)
|
| 567 |
+
tmp_values[20] = _mm256_extract_epi8(b.values, 20);
|
| 568 |
+
if (mask & 0x200000)
|
| 569 |
+
tmp_values[21] = _mm256_extract_epi8(b.values, 21);
|
| 570 |
+
if (mask & 0x400000)
|
| 571 |
+
tmp_values[22] = _mm256_extract_epi8(b.values, 22);
|
| 572 |
+
if (mask & 0x800000)
|
| 573 |
+
tmp_values[23] = _mm256_extract_epi8(b.values, 23);
|
| 574 |
+
if (mask & 0x1000000)
|
| 575 |
+
tmp_values[24] = _mm256_extract_epi8(b.values, 24);
|
| 576 |
+
if (mask & 0x2000000)
|
| 577 |
+
tmp_values[25] = _mm256_extract_epi8(b.values, 25);
|
| 578 |
+
if (mask & 0x4000000)
|
| 579 |
+
tmp_values[26] = _mm256_extract_epi8(b.values, 26);
|
| 580 |
+
if (mask & 0x8000000)
|
| 581 |
+
tmp_values[27] = _mm256_extract_epi8(b.values, 27);
|
| 582 |
+
if (mask & 0x10000000)
|
| 583 |
+
tmp_values[28] = _mm256_extract_epi8(b.values, 28);
|
| 584 |
+
if (mask & 0x20000000)
|
| 585 |
+
tmp_values[29] = _mm256_extract_epi8(b.values, 29);
|
| 586 |
+
if (mask & 0x40000000)
|
| 587 |
+
tmp_values[30] = _mm256_extract_epi8(b.values, 30);
|
| 588 |
+
if (mask & 0x80000000)
|
| 589 |
+
tmp_values[31] = _mm256_extract_epi8(b.values, 31);
|
| 590 |
+
return loadu(tmp_values);
|
| 591 |
+
}
|
| 592 |
+
static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
|
| 593 |
+
const Vectorized<T>& mask) {
|
| 594 |
+
return _mm256_blendv_epi8(a.values, b.values, mask.values);
|
| 595 |
+
}
|
| 596 |
+
template <typename step_t>
|
| 597 |
+
static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
|
| 598 |
+
return Vectorized<T>(
|
| 599 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 600 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 601 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 602 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 603 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 604 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 605 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 606 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
|
| 607 |
+
}
|
| 608 |
+
static Vectorized<T>
|
| 609 |
+
set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
|
| 610 |
+
switch (count) {
|
| 611 |
+
case 0:
|
| 612 |
+
return a;
|
| 613 |
+
case 1:
|
| 614 |
+
return blend<0x1>(a, b);
|
| 615 |
+
case 2:
|
| 616 |
+
return blend<0x3>(a, b);
|
| 617 |
+
case 3:
|
| 618 |
+
return blend<0x7>(a, b);
|
| 619 |
+
case 4:
|
| 620 |
+
return blend<0xF>(a, b);
|
| 621 |
+
case 5:
|
| 622 |
+
return blend<0x1F>(a, b);
|
| 623 |
+
case 6:
|
| 624 |
+
return blend<0x3F>(a, b);
|
| 625 |
+
case 7:
|
| 626 |
+
return blend<0x7F>(a, b);
|
| 627 |
+
case 8:
|
| 628 |
+
return blend<0xFF>(a, b);
|
| 629 |
+
case 9:
|
| 630 |
+
return blend<0x1FF>(a, b);
|
| 631 |
+
case 10:
|
| 632 |
+
return blend<0x3FF>(a, b);
|
| 633 |
+
case 11:
|
| 634 |
+
return blend<0x7FF>(a, b);
|
| 635 |
+
case 12:
|
| 636 |
+
return blend<0xFFF>(a, b);
|
| 637 |
+
case 13:
|
| 638 |
+
return blend<0x1FFF>(a, b);
|
| 639 |
+
case 14:
|
| 640 |
+
return blend<0x3FFF>(a, b);
|
| 641 |
+
case 15:
|
| 642 |
+
return blend<0x7FFF>(a, b);
|
| 643 |
+
case 16:
|
| 644 |
+
return blend<0xFFFF>(a, b);
|
| 645 |
+
case 17:
|
| 646 |
+
return blend<0x1FFFF>(a, b);
|
| 647 |
+
case 18:
|
| 648 |
+
return blend<0x3FFFF>(a, b);
|
| 649 |
+
case 19:
|
| 650 |
+
return blend<0x7FFFF>(a, b);
|
| 651 |
+
case 20:
|
| 652 |
+
return blend<0xFFFFF>(a, b);
|
| 653 |
+
case 21:
|
| 654 |
+
return blend<0x1FFFFF>(a, b);
|
| 655 |
+
case 22:
|
| 656 |
+
return blend<0x3FFFFF>(a, b);
|
| 657 |
+
case 23:
|
| 658 |
+
return blend<0x7FFFFF>(a, b);
|
| 659 |
+
case 24:
|
| 660 |
+
return blend<0xFFFFFF>(a, b);
|
| 661 |
+
case 25:
|
| 662 |
+
return blend<0x1FFFFFF>(a, b);
|
| 663 |
+
case 26:
|
| 664 |
+
return blend<0x3FFFFFF>(a, b);
|
| 665 |
+
case 27:
|
| 666 |
+
return blend<0x7FFFFFF>(a, b);
|
| 667 |
+
case 28:
|
| 668 |
+
return blend<0xFFFFFFF>(a, b);
|
| 669 |
+
case 29:
|
| 670 |
+
return blend<0x1FFFFFFF>(a, b);
|
| 671 |
+
case 30:
|
| 672 |
+
return blend<0x3FFFFFFF>(a, b);
|
| 673 |
+
case 31:
|
| 674 |
+
return blend<0x7FFFFFFF>(a, b);
|
| 675 |
+
}
|
| 676 |
+
return b;
|
| 677 |
+
}
|
| 678 |
+
static Vectorized<T> loadu(const void* ptr) {
|
| 679 |
+
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
|
| 680 |
+
}
|
| 681 |
+
static Vectorized<T> loadu_one_fourth(const void* ptr) {
|
| 682 |
+
// Fast path if only load element number of 8.
|
| 683 |
+
// Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
|
| 684 |
+
// Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits.
|
| 685 |
+
// However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined.
|
| 686 |
+
// TODO<leslie> We can use _mm256_zextsi128_si256 in the furture,
|
| 687 |
+
// since gcc 9.3 doesn't support it now.
|
| 688 |
+
__m128i input_128 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
|
| 689 |
+
return _mm256_castsi128_si256(input_128);
|
| 690 |
+
}
|
| 691 |
+
static Vectorized<T> loadu(const void* ptr, T count) {
|
| 692 |
+
__at_align__ T tmp_values[size()];
|
| 693 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 694 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 695 |
+
// instructions while a loop would be compiled to one instruction.
|
| 696 |
+
for (const auto i : c10::irange(size())) {
|
| 697 |
+
tmp_values[i] = 0;
|
| 698 |
+
}
|
| 699 |
+
std::memcpy(tmp_values, ptr, count * sizeof(T));
|
| 700 |
+
return loadu(tmp_values);
|
| 701 |
+
}
|
| 702 |
+
void store(void* ptr, int count = size()) const {
|
| 703 |
+
if (count == size()) {
|
| 704 |
+
// ptr need not to be aligned here. See
|
| 705 |
+
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
|
| 706 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
|
| 707 |
+
} else if (count > 0) {
|
| 708 |
+
if (count == 8) {
|
| 709 |
+
// Fast path if only store element number of 8
|
| 710 |
+
_mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values));
|
| 711 |
+
} else {
|
| 712 |
+
__at_align__ T tmp_values[size()];
|
| 713 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
|
| 714 |
+
std::memcpy(ptr, tmp_values, count * sizeof(T));
|
| 715 |
+
}
|
| 716 |
+
}
|
| 717 |
+
}
|
| 718 |
+
const T& operator[](int idx) const = delete;
|
| 719 |
+
T& operator[](int idx) = delete;
|
| 720 |
+
Vectorized<T> real() const {
|
| 721 |
+
return *this;
|
| 722 |
+
}
|
| 723 |
+
Vectorized<T> imag() const {
|
| 724 |
+
return _mm256_set1_epi8(0);
|
| 725 |
+
}
|
| 726 |
+
Vectorized<T> conj() const {
|
| 727 |
+
return *this;
|
| 728 |
+
}
|
| 729 |
+
};
|
| 730 |
+
|
| 731 |
+
template<>
|
| 732 |
+
class Vectorized<int8_t>: public Vectorized8<int8_t> {
|
| 733 |
+
public:
|
| 734 |
+
using Vectorized8::Vectorized8;
|
| 735 |
+
|
| 736 |
+
Vectorized<int8_t> neg() const;
|
| 737 |
+
|
| 738 |
+
Vectorized<int8_t> abs() const {
|
| 739 |
+
return _mm256_abs_epi8(values);
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
|
| 743 |
+
return _mm256_cmpeq_epi8(values, other.values);
|
| 744 |
+
}
|
| 745 |
+
Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
|
| 746 |
+
return invert(_mm256_cmpeq_epi8(values, other.values));
|
| 747 |
+
}
|
| 748 |
+
Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
|
| 749 |
+
return _mm256_cmpgt_epi8(other.values, values);
|
| 750 |
+
}
|
| 751 |
+
Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
|
| 752 |
+
return invert(_mm256_cmpgt_epi8(values, other.values));
|
| 753 |
+
}
|
| 754 |
+
Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
|
| 755 |
+
return other < *this;
|
| 756 |
+
}
|
| 757 |
+
Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
|
| 758 |
+
return other <= *this;
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
|
| 762 |
+
Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
|
| 763 |
+
Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
|
| 764 |
+
Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
|
| 765 |
+
Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
|
| 766 |
+
Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
|
| 767 |
+
};
|
| 768 |
+
|
| 769 |
+
template<>
|
| 770 |
+
class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
|
| 771 |
+
public:
|
| 772 |
+
using Vectorized8::Vectorized8;
|
| 773 |
+
|
| 774 |
+
Vectorized<uint8_t> neg() const;
|
| 775 |
+
|
| 776 |
+
Vectorized<uint8_t> abs() const {
|
| 777 |
+
return *this;
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
|
| 781 |
+
return _mm256_cmpeq_epi8(values, other.values);
|
| 782 |
+
}
|
| 783 |
+
Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
|
| 784 |
+
return invert(_mm256_cmpeq_epi8(values, other.values));
|
| 785 |
+
}
|
| 786 |
+
Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
|
| 787 |
+
__m256i max = _mm256_max_epu8(values, other.values);
|
| 788 |
+
return invert(_mm256_cmpeq_epi8(max, values));
|
| 789 |
+
}
|
| 790 |
+
Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
|
| 791 |
+
__m256i max = _mm256_max_epu8(values, other.values);
|
| 792 |
+
return _mm256_cmpeq_epi8(max, other.values);
|
| 793 |
+
}
|
| 794 |
+
Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
|
| 795 |
+
return other < *this;
|
| 796 |
+
}
|
| 797 |
+
Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
|
| 798 |
+
return other <= *this;
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
|
| 802 |
+
Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
|
| 803 |
+
Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
|
| 804 |
+
Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
|
| 805 |
+
Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
|
| 806 |
+
Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
|
| 807 |
+
};
|
| 808 |
+
|
| 809 |
+
template <>
|
| 810 |
+
Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 811 |
+
return _mm256_add_epi64(a, b);
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
template <>
|
| 815 |
+
Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 816 |
+
return _mm256_add_epi32(a, b);
|
| 817 |
+
}
|
| 818 |
+
|
| 819 |
+
template <>
|
| 820 |
+
Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 821 |
+
return _mm256_add_epi16(a, b);
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
template <>
|
| 825 |
+
Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 826 |
+
return _mm256_add_epi8(a, b);
|
| 827 |
+
}
|
| 828 |
+
|
| 829 |
+
template <>
|
| 830 |
+
Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 831 |
+
return _mm256_add_epi8(a, b);
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
template <>
|
| 835 |
+
Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 836 |
+
return _mm256_sub_epi64(a, b);
|
| 837 |
+
}
|
| 838 |
+
|
| 839 |
+
template <>
|
| 840 |
+
Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 841 |
+
return _mm256_sub_epi32(a, b);
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
template <>
|
| 845 |
+
Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 846 |
+
return _mm256_sub_epi16(a, b);
|
| 847 |
+
}
|
| 848 |
+
|
| 849 |
+
template <>
|
| 850 |
+
Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 851 |
+
return _mm256_sub_epi8(a, b);
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
template <>
|
| 855 |
+
Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 856 |
+
return _mm256_sub_epi8(a, b);
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
// Negation. Defined here so we can utilize operator-
|
| 860 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
|
| 861 |
+
return Vectorized<int64_t>(0) - *this;
|
| 862 |
+
}
|
| 863 |
+
|
| 864 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
|
| 865 |
+
return Vectorized<int32_t>(0) - *this;
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
|
| 869 |
+
return Vectorized<int16_t>(0) - *this;
|
| 870 |
+
}
|
| 871 |
+
|
| 872 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
|
| 873 |
+
return Vectorized<int8_t>(0) - *this;
|
| 874 |
+
}
|
| 875 |
+
|
| 876 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
|
| 877 |
+
return Vectorized<uint8_t>(0) - *this;
|
| 878 |
+
}
|
| 879 |
+
|
| 880 |
+
// Emulate operations with no native 64-bit support in avx,
|
| 881 |
+
// by extracting each element, performing the operation pointwise,
|
| 882 |
+
// then combining the results into a vector.
|
| 883 |
+
template <typename op_t>
|
| 884 |
+
Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const op_t& op) {
|
| 885 |
+
int64_t a0 = _mm256_extract_epi64(a, 0);
|
| 886 |
+
int64_t a1 = _mm256_extract_epi64(a, 1);
|
| 887 |
+
int64_t a2 = _mm256_extract_epi64(a, 2);
|
| 888 |
+
int64_t a3 = _mm256_extract_epi64(a, 3);
|
| 889 |
+
|
| 890 |
+
int64_t b0 = _mm256_extract_epi64(b, 0);
|
| 891 |
+
int64_t b1 = _mm256_extract_epi64(b, 1);
|
| 892 |
+
int64_t b2 = _mm256_extract_epi64(b, 2);
|
| 893 |
+
int64_t b3 = _mm256_extract_epi64(b, 3);
|
| 894 |
+
|
| 895 |
+
int64_t c0 = op(a0, b0);
|
| 896 |
+
int64_t c1 = op(a1, b1);
|
| 897 |
+
int64_t c2 = op(a2, b2);
|
| 898 |
+
int64_t c3 = op(a3, b3);
|
| 899 |
+
|
| 900 |
+
return _mm256_set_epi64x(c3, c2, c1, c0);
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
template <typename op_t>
|
| 904 |
+
Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& c, const op_t& op) {
|
| 905 |
+
int64_t a0 = _mm256_extract_epi64(a, 0);
|
| 906 |
+
int64_t a1 = _mm256_extract_epi64(a, 1);
|
| 907 |
+
int64_t a2 = _mm256_extract_epi64(a, 2);
|
| 908 |
+
int64_t a3 = _mm256_extract_epi64(a, 3);
|
| 909 |
+
|
| 910 |
+
int64_t b0 = _mm256_extract_epi64(b, 0);
|
| 911 |
+
int64_t b1 = _mm256_extract_epi64(b, 1);
|
| 912 |
+
int64_t b2 = _mm256_extract_epi64(b, 2);
|
| 913 |
+
int64_t b3 = _mm256_extract_epi64(b, 3);
|
| 914 |
+
|
| 915 |
+
int64_t c0 = _mm256_extract_epi64(c, 0);
|
| 916 |
+
int64_t c1 = _mm256_extract_epi64(c, 1);
|
| 917 |
+
int64_t c2 = _mm256_extract_epi64(c, 2);
|
| 918 |
+
int64_t c3 = _mm256_extract_epi64(c, 3);
|
| 919 |
+
|
| 920 |
+
int64_t d0 = op(a0, b0, c0);
|
| 921 |
+
int64_t d1 = op(a1, b1, c1);
|
| 922 |
+
int64_t d2 = op(a2, b2, c2);
|
| 923 |
+
int64_t d3 = op(a3, b3, c3);
|
| 924 |
+
|
| 925 |
+
return _mm256_set_epi64x(d3, d2, d1, d0);
|
| 926 |
+
}
|
| 927 |
+
|
| 928 |
+
// AVX2 has no intrinsic for int64_t multiply so it needs to be emulated
|
| 929 |
+
// This could be implemented more efficiently using epi32 instructions
|
| 930 |
+
// This is also technically avx compatible, but then we'll need AVX
|
| 931 |
+
// code for add as well.
|
| 932 |
+
// Note: intentionally ignores undefined behavior like (-lowest * -1).
|
| 933 |
+
template <>
|
| 934 |
+
Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 935 |
+
return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;});
|
| 936 |
+
}
|
| 937 |
+
|
| 938 |
+
template <>
|
| 939 |
+
Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 940 |
+
return _mm256_mullo_epi32(a, b);
|
| 941 |
+
}
|
| 942 |
+
|
| 943 |
+
template <>
|
| 944 |
+
Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 945 |
+
return _mm256_mullo_epi16(a, b);
|
| 946 |
+
}
|
| 947 |
+
|
| 948 |
+
template <typename T, typename Op>
|
| 949 |
+
Vectorized<T> inline int_elementwise_binary_256(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 950 |
+
T values_a[Vectorized<T>::size()];
|
| 951 |
+
T values_b[Vectorized<T>::size()];
|
| 952 |
+
a.store(values_a);
|
| 953 |
+
b.store(values_b);
|
| 954 |
+
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
| 955 |
+
values_a[i] = op(values_a[i], values_b[i]);
|
| 956 |
+
}
|
| 957 |
+
return Vectorized<T>::loadu(values_a);
|
| 958 |
+
}
|
| 959 |
+
|
| 960 |
+
template <>
|
| 961 |
+
Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 962 |
+
// We don't have an instruction for multiplying int8_t
|
| 963 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 964 |
+
return int_elementwise_binary_256(a, b, std::multiplies<int8_t>());
|
| 965 |
+
#else
|
| 966 |
+
__m256i mask00FF = _mm256_set1_epi16(0x00FF);
|
| 967 |
+
__m256i a_lo = _mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8);
|
| 968 |
+
__m256i b_lo = _mm256_srai_epi16(_mm256_slli_epi16(b, 8), 8);
|
| 969 |
+
__m256i a_hi = _mm256_srai_epi16(a, 8);
|
| 970 |
+
__m256i b_hi = _mm256_srai_epi16(b, 8);
|
| 971 |
+
__m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
|
| 972 |
+
__m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
|
| 973 |
+
__m256i res = _mm256_or_si256(res_hi, res_lo);
|
| 974 |
+
return res;
|
| 975 |
+
#endif
|
| 976 |
+
}
|
| 977 |
+
|
| 978 |
+
template <>
|
| 979 |
+
Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 980 |
+
// We don't have an instruction for multiplying uint8_t
|
| 981 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 982 |
+
return int_elementwise_binary_256(a, b, std::multiplies<uint8_t>());
|
| 983 |
+
#else
|
| 984 |
+
__m256i mask00FF = _mm256_set1_epi16(0x00FF);
|
| 985 |
+
__m256i a_lo = _mm256_and_si256 (a, mask00FF);
|
| 986 |
+
__m256i b_lo = _mm256_and_si256 (b, mask00FF);
|
| 987 |
+
__m256i a_hi = _mm256_srli_epi16(a, 8);
|
| 988 |
+
__m256i b_hi = _mm256_srli_epi16(b, 8);
|
| 989 |
+
__m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
|
| 990 |
+
__m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
|
| 991 |
+
__m256i res = _mm256_or_si256(res_hi, res_lo);
|
| 992 |
+
return res;
|
| 993 |
+
#endif
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
template <>
|
| 997 |
+
Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 998 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 999 |
+
return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);});
|
| 1000 |
+
#else
|
| 1001 |
+
__m256i cmp = _mm256_cmpgt_epi64(a, b);
|
| 1002 |
+
return _mm256_blendv_epi8(a, b, cmp);
|
| 1003 |
+
#endif
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
template <>
|
| 1007 |
+
Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1008 |
+
return _mm256_min_epi32(a, b);
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
template <>
|
| 1012 |
+
Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1013 |
+
return _mm256_min_epi16(a, b);
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
template <>
|
| 1017 |
+
Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1018 |
+
return _mm256_min_epi8(a, b);
|
| 1019 |
+
}
|
| 1020 |
+
|
| 1021 |
+
template <>
|
| 1022 |
+
Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1023 |
+
return _mm256_min_epu8(a, b);
|
| 1024 |
+
}
|
| 1025 |
+
|
| 1026 |
+
template <>
|
| 1027 |
+
Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1028 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 1029 |
+
return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);});
|
| 1030 |
+
#else
|
| 1031 |
+
__m256i cmp = _mm256_cmpgt_epi64(a, b);
|
| 1032 |
+
return _mm256_blendv_epi8(b, a, cmp);
|
| 1033 |
+
#endif
|
| 1034 |
+
}
|
| 1035 |
+
|
| 1036 |
+
template <>
|
| 1037 |
+
Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1038 |
+
return _mm256_max_epi32(a, b);
|
| 1039 |
+
}
|
| 1040 |
+
|
| 1041 |
+
template <>
|
| 1042 |
+
Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1043 |
+
return _mm256_max_epi16(a, b);
|
| 1044 |
+
}
|
| 1045 |
+
|
| 1046 |
+
template <>
|
| 1047 |
+
Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1048 |
+
return _mm256_max_epi8(a, b);
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
template <>
|
| 1052 |
+
Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1053 |
+
return _mm256_max_epu8(a, b);
|
| 1054 |
+
}
|
| 1055 |
+
|
| 1056 |
+
template <>
|
| 1057 |
+
Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
|
| 1058 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 1059 |
+
return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));});
|
| 1060 |
+
#else
|
| 1061 |
+
return minimum(maximum(a, min_val), max_val);
|
| 1062 |
+
#endif
|
| 1063 |
+
}
|
| 1064 |
+
|
| 1065 |
+
template <>
|
| 1066 |
+
Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
|
| 1067 |
+
return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val));
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
template <>
|
| 1071 |
+
Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
|
| 1072 |
+
return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val));
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
template <>
|
| 1076 |
+
Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
|
| 1077 |
+
return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val));
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
template <>
|
| 1081 |
+
Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
|
| 1082 |
+
return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val));
|
| 1083 |
+
}
|
| 1084 |
+
|
| 1085 |
+
template <>
|
| 1086 |
+
Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
|
| 1087 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 1088 |
+
return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);});
|
| 1089 |
+
#else
|
| 1090 |
+
return minimum(max_val, a);
|
| 1091 |
+
#endif
|
| 1092 |
+
}
|
| 1093 |
+
|
| 1094 |
+
template <>
|
| 1095 |
+
Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
|
| 1096 |
+
return _mm256_min_epi32(max_val, a);
|
| 1097 |
+
}
|
| 1098 |
+
|
| 1099 |
+
template <>
|
| 1100 |
+
Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
|
| 1101 |
+
return _mm256_min_epi16(max_val, a);
|
| 1102 |
+
}
|
| 1103 |
+
|
| 1104 |
+
template <>
|
| 1105 |
+
Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
|
| 1106 |
+
return _mm256_min_epi8(max_val, a);
|
| 1107 |
+
}
|
| 1108 |
+
|
| 1109 |
+
template <>
|
| 1110 |
+
Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
|
| 1111 |
+
return _mm256_min_epu8(max_val, a);
|
| 1112 |
+
}
|
| 1113 |
+
|
| 1114 |
+
template <>
|
| 1115 |
+
Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
|
| 1116 |
+
#ifndef CPU_CAPABILITY_AVX2
|
| 1117 |
+
return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);});
|
| 1118 |
+
#else
|
| 1119 |
+
return maximum(min_val, a);
|
| 1120 |
+
#endif
|
| 1121 |
+
}
|
| 1122 |
+
|
| 1123 |
+
template <>
|
| 1124 |
+
Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
|
| 1125 |
+
return _mm256_max_epi32(min_val, a);
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
template <>
|
| 1129 |
+
Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
|
| 1130 |
+
return _mm256_max_epi16(min_val, a);
|
| 1131 |
+
}
|
| 1132 |
+
|
| 1133 |
+
template <>
|
| 1134 |
+
Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
|
| 1135 |
+
return _mm256_max_epi8(min_val, a);
|
| 1136 |
+
}
|
| 1137 |
+
|
| 1138 |
+
template <>
|
| 1139 |
+
Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
|
| 1140 |
+
return _mm256_max_epu8(min_val, a);
|
| 1141 |
+
}
|
| 1142 |
+
|
| 1143 |
+
template<typename T>
|
| 1144 |
+
Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
|
| 1145 |
+
return Vectorized<int32_t>::loadu(ptr);
|
| 1146 |
+
}
|
| 1147 |
+
|
| 1148 |
+
template<>
|
| 1149 |
+
Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
|
| 1150 |
+
return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
|
| 1151 |
+
}
|
| 1152 |
+
|
| 1153 |
+
template<>
|
| 1154 |
+
Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
|
| 1155 |
+
return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
|
| 1156 |
+
}
|
| 1157 |
+
|
| 1158 |
+
template <>
|
| 1159 |
+
Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1160 |
+
return int_elementwise_binary_256(a, b, std::divides<int64_t>());
|
| 1161 |
+
}
|
| 1162 |
+
template <>
|
| 1163 |
+
Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1164 |
+
return int_elementwise_binary_256(a, b, std::divides<int32_t>());
|
| 1165 |
+
}
|
| 1166 |
+
template <>
|
| 1167 |
+
Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1168 |
+
return int_elementwise_binary_256(a, b, std::divides<int16_t>());
|
| 1169 |
+
}
|
| 1170 |
+
template <>
|
| 1171 |
+
Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1172 |
+
return int_elementwise_binary_256(a, b, std::divides<int8_t>());
|
| 1173 |
+
}
|
| 1174 |
+
template <>
|
| 1175 |
+
Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1176 |
+
return int_elementwise_binary_256(a, b, std::divides<uint8_t>());
|
| 1177 |
+
}
|
| 1178 |
+
|
| 1179 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1180 |
+
inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1181 |
+
return _mm256_and_si256(a, b);
|
| 1182 |
+
}
|
| 1183 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1184 |
+
inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1185 |
+
return _mm256_or_si256(a, b);
|
| 1186 |
+
}
|
| 1187 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1188 |
+
inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1189 |
+
return _mm256_xor_si256(a, b);
|
| 1190 |
+
}
|
| 1191 |
+
template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
|
| 1192 |
+
inline Vectorized<T> operator~(const Vectorized<T>& a) {
|
| 1193 |
+
return _mm256_xor_si256(a, _mm256_set1_epi32(-1));
|
| 1194 |
+
}
|
| 1195 |
+
|
| 1196 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
|
| 1197 |
+
return (*this == other) & Vectorized<int64_t>(1);
|
| 1198 |
+
}
|
| 1199 |
+
|
| 1200 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
|
| 1201 |
+
return (*this != other) & Vectorized<int64_t>(1);
|
| 1202 |
+
}
|
| 1203 |
+
|
| 1204 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
|
| 1205 |
+
return (*this > other) & Vectorized<int64_t>(1);
|
| 1206 |
+
}
|
| 1207 |
+
|
| 1208 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
|
| 1209 |
+
return (*this >= other) & Vectorized<int64_t>(1);
|
| 1210 |
+
}
|
| 1211 |
+
|
| 1212 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
|
| 1213 |
+
return (*this < other) & Vectorized<int64_t>(1);
|
| 1214 |
+
}
|
| 1215 |
+
|
| 1216 |
+
inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
|
| 1217 |
+
return (*this <= other) & Vectorized<int64_t>(1);
|
| 1218 |
+
}
|
| 1219 |
+
|
| 1220 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
|
| 1221 |
+
return (*this == other) & Vectorized<int32_t>(1);
|
| 1222 |
+
}
|
| 1223 |
+
|
| 1224 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
|
| 1225 |
+
return (*this != other) & Vectorized<int32_t>(1);
|
| 1226 |
+
}
|
| 1227 |
+
|
| 1228 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
|
| 1229 |
+
return (*this > other) & Vectorized<int32_t>(1);
|
| 1230 |
+
}
|
| 1231 |
+
|
| 1232 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
|
| 1233 |
+
return (*this >= other) & Vectorized<int32_t>(1);
|
| 1234 |
+
}
|
| 1235 |
+
|
| 1236 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
|
| 1237 |
+
return (*this < other) & Vectorized<int32_t>(1);
|
| 1238 |
+
}
|
| 1239 |
+
|
| 1240 |
+
inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
|
| 1241 |
+
return (*this <= other) & Vectorized<int32_t>(1);
|
| 1242 |
+
}
|
| 1243 |
+
|
| 1244 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
|
| 1245 |
+
return (*this == other) & Vectorized<int16_t>(1);
|
| 1246 |
+
}
|
| 1247 |
+
|
| 1248 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
|
| 1249 |
+
return (*this != other) & Vectorized<int16_t>(1);
|
| 1250 |
+
}
|
| 1251 |
+
|
| 1252 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
|
| 1253 |
+
return (*this > other) & Vectorized<int16_t>(1);
|
| 1254 |
+
}
|
| 1255 |
+
|
| 1256 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
|
| 1257 |
+
return (*this >= other) & Vectorized<int16_t>(1);
|
| 1258 |
+
}
|
| 1259 |
+
|
| 1260 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
|
| 1261 |
+
return (*this < other) & Vectorized<int16_t>(1);
|
| 1262 |
+
}
|
| 1263 |
+
|
| 1264 |
+
inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
|
| 1265 |
+
return (*this <= other) & Vectorized<int16_t>(1);
|
| 1266 |
+
}
|
| 1267 |
+
|
| 1268 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
|
| 1269 |
+
return (*this == other) & Vectorized<int8_t>(1);
|
| 1270 |
+
}
|
| 1271 |
+
|
| 1272 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
|
| 1273 |
+
return (*this != other) & Vectorized<int8_t>(1);
|
| 1274 |
+
}
|
| 1275 |
+
|
| 1276 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
|
| 1277 |
+
return (*this > other) & Vectorized<int8_t>(1);
|
| 1278 |
+
}
|
| 1279 |
+
|
| 1280 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
|
| 1281 |
+
return (*this >= other) & Vectorized<int8_t>(1);
|
| 1282 |
+
}
|
| 1283 |
+
|
| 1284 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
|
| 1285 |
+
return (*this < other) & Vectorized<int8_t>(1);
|
| 1286 |
+
}
|
| 1287 |
+
|
| 1288 |
+
inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
|
| 1289 |
+
return (*this <= other) & Vectorized<int8_t>(1);
|
| 1290 |
+
}
|
| 1291 |
+
|
| 1292 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
|
| 1293 |
+
return (*this == other) & Vectorized<uint8_t>(1);
|
| 1294 |
+
}
|
| 1295 |
+
|
| 1296 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
|
| 1297 |
+
return (*this != other) & Vectorized<uint8_t>(1);
|
| 1298 |
+
}
|
| 1299 |
+
|
| 1300 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
|
| 1301 |
+
return (*this > other) & Vectorized<uint8_t>(1);
|
| 1302 |
+
}
|
| 1303 |
+
|
| 1304 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
|
| 1305 |
+
return (*this >= other) & Vectorized<uint8_t>(1);
|
| 1306 |
+
}
|
| 1307 |
+
|
| 1308 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
|
| 1309 |
+
return (*this < other) & Vectorized<uint8_t>(1);
|
| 1310 |
+
}
|
| 1311 |
+
|
| 1312 |
+
inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
|
| 1313 |
+
return (*this <= other) & Vectorized<uint8_t>(1);
|
| 1314 |
+
}
|
| 1315 |
+
|
| 1316 |
+
template <bool left_shift>
|
| 1317 |
+
Vectorized<int16_t> inline shift_256_16(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1318 |
+
// No vector instruction for shifting int16_t, so emulating it instead.
|
| 1319 |
+
|
| 1320 |
+
// Control masks for shuffle operation, treating 256 bits as an
|
| 1321 |
+
// array of 16-bit elements, and considering pairs of neighboring
|
| 1322 |
+
// elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
|
| 1323 |
+
// M!=N) is set so that shuffle will move element with index M from
|
| 1324 |
+
// input pair into element with index N in output pair, and element
|
| 1325 |
+
// with index M in output pair will be set to all 0s.
|
| 1326 |
+
__m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80,
|
| 1327 |
+
21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80,
|
| 1328 |
+
13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80,
|
| 1329 |
+
5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80);
|
| 1330 |
+
__m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26,
|
| 1331 |
+
0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18,
|
| 1332 |
+
0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10,
|
| 1333 |
+
0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2);
|
| 1334 |
+
|
| 1335 |
+
// Masks for bitwise and operation, treating 256 bits as an array of
|
| 1336 |
+
// 16-bit elements, and considering them in pairs of neighboring
|
| 1337 |
+
// elements. A mask named "keep_M" (M in [0,1]) is set so that
|
| 1338 |
+
// bitwise and will copy element with index M from input pair into
|
| 1339 |
+
// element with the same index in output pair, while the other
|
| 1340 |
+
// element in output pair will be set to all 0s.
|
| 1341 |
+
__m256i keep_0 = _mm256_set1_epi32(0xFFFF);
|
| 1342 |
+
__m256i keep_1 = _mm256_set1_epi32(0xFFFF0000);
|
| 1343 |
+
|
| 1344 |
+
// Take each 16-bit element with idx%2==0 from input array to be
|
| 1345 |
+
// shifted and extend it to 32 bits so that 0s are added to the
|
| 1346 |
+
// right. Then, perform shifting on this 32-bit number. Upper 16
|
| 1347 |
+
// bits will be proper result of shifting original 16-bit number, so
|
| 1348 |
+
// write them to result array, into the same position from which
|
| 1349 |
+
// corresponding input element is taken. Also, make sure that
|
| 1350 |
+
// result array elements with idx%2!=0 are set to all 0s.
|
| 1351 |
+
//
|
| 1352 |
+
// Note that number of bits to shift for is extended to 32 bits by
|
| 1353 |
+
// adding 0s to the left. That means this number is not properly
|
| 1354 |
+
// sign-extended for negative values. However, number of bits to
|
| 1355 |
+
// shift is treated as an unsigned integer by respective shift
|
| 1356 |
+
// intrinsics anyway so if negative then either with or without
|
| 1357 |
+
// proper sign extension, it will be interpreted as a number greater
|
| 1358 |
+
// than 32, and the shifting result will be the same.
|
| 1359 |
+
__m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1);
|
| 1360 |
+
__m256i b0 = _mm256_and_si256(b, keep_0);
|
| 1361 |
+
__m256i c0;
|
| 1362 |
+
if (left_shift)
|
| 1363 |
+
c0 = _mm256_sllv_epi32(a0, b0);
|
| 1364 |
+
else
|
| 1365 |
+
c0 = _mm256_srav_epi32(a0, b0);
|
| 1366 |
+
c0 = _mm256_shuffle_epi8(c0, ctl_1_0);
|
| 1367 |
+
|
| 1368 |
+
// Peform shifting the same way for input array elements with
|
| 1369 |
+
// idx%2==1.
|
| 1370 |
+
__m256i a1 = _mm256_and_si256(a, keep_1);
|
| 1371 |
+
__m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
|
| 1372 |
+
__m256i c1;
|
| 1373 |
+
if (left_shift)
|
| 1374 |
+
c1 = _mm256_sllv_epi32(a1, b1);
|
| 1375 |
+
else
|
| 1376 |
+
c1 = _mm256_srav_epi32(a1, b1);
|
| 1377 |
+
c1 = _mm256_and_si256(c1, keep_1);
|
| 1378 |
+
|
| 1379 |
+
// Merge partial results into the final result.
|
| 1380 |
+
__m256i c = _mm256_or_si256(c0, c1);
|
| 1381 |
+
|
| 1382 |
+
return c;
|
| 1383 |
+
}
|
| 1384 |
+
|
| 1385 |
+
template <bool left_shift, typename T, typename std::enable_if_t<std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>, int> = 0>
|
| 1386 |
+
Vectorized<T> inline shift_256_8(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 1387 |
+
// No vector instruction for shifting int8_t/uint8_t, so emulating
|
| 1388 |
+
// it instead.
|
| 1389 |
+
|
| 1390 |
+
// Control masks for shuffle operation, treating 256 bits as an
|
| 1391 |
+
// array of 8-bit elements, and considering quadruples of
|
| 1392 |
+
// neighboring elements. Specifially, a mask named "ctl_M_N" (M,N
|
| 1393 |
+
// in [0,1,2,3], and M!=N) is set so that shuffle will move element
|
| 1394 |
+
// with index M from input quadruple into element with index N in
|
| 1395 |
+
// output quadruple, and other elements in output quadruple will be
|
| 1396 |
+
// set to all 0s.
|
| 1397 |
+
__m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80,
|
| 1398 |
+
20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80,
|
| 1399 |
+
12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80,
|
| 1400 |
+
4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80);
|
| 1401 |
+
__m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25,
|
| 1402 |
+
0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17,
|
| 1403 |
+
0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9,
|
| 1404 |
+
0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1);
|
| 1405 |
+
__m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80,
|
| 1406 |
+
21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80,
|
| 1407 |
+
13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80,
|
| 1408 |
+
5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80);
|
| 1409 |
+
__m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26,
|
| 1410 |
+
0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18,
|
| 1411 |
+
0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10,
|
| 1412 |
+
0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2);
|
| 1413 |
+
__m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80,
|
| 1414 |
+
22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80,
|
| 1415 |
+
14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80,
|
| 1416 |
+
6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80);
|
| 1417 |
+
__m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27,
|
| 1418 |
+
0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19,
|
| 1419 |
+
0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11,
|
| 1420 |
+
0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3);
|
| 1421 |
+
__m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80,
|
| 1422 |
+
0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80,
|
| 1423 |
+
0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80,
|
| 1424 |
+
0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80);
|
| 1425 |
+
__m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80,
|
| 1426 |
+
0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80,
|
| 1427 |
+
0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80,
|
| 1428 |
+
0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80);
|
| 1429 |
+
|
| 1430 |
+
// Masks for bitwise and operation, treating 256 bits as an array of
|
| 1431 |
+
// 8-bit elements, and considering them in quadruples of neighboring
|
| 1432 |
+
// elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that
|
| 1433 |
+
// bitwise and will copy element with index M from input quadruple
|
| 1434 |
+
// into element with the same index in output quadruple, while the
|
| 1435 |
+
// other elements in output quadruple will be set to all 0s.
|
| 1436 |
+
__m256i keep_0 = _mm256_set1_epi32(0xFF);
|
| 1437 |
+
__m256i keep_3 = _mm256_set1_epi32(0xFF000000);
|
| 1438 |
+
|
| 1439 |
+
// Take each 8-bit element with idx%4==0 from input array to be
|
| 1440 |
+
// shifted and extend it to 32 bits so that 0s are added to the
|
| 1441 |
+
// right. Then, perform shifting on this 32-bit number. Upper 8
|
| 1442 |
+
// bits will be proper result of shifting original 8-bit number, so
|
| 1443 |
+
// write them to result array, into the same position from which
|
| 1444 |
+
// corresponding input element is taken. Also, make sure that
|
| 1445 |
+
// result array elements with idx%4!=0 are set to all 0s.
|
| 1446 |
+
//
|
| 1447 |
+
// Note that number of bits to shift for is extended to 32 bits by
|
| 1448 |
+
// adding 0s to the left. That means this number is not properly
|
| 1449 |
+
// sign-extended for negative values. However, number of bits to
|
| 1450 |
+
// shift is treated as an unsigned integer by respective shift
|
| 1451 |
+
// intrinsics anyway so if negative then either with or without
|
| 1452 |
+
// proper sign extension, it will be interpreted as a number greater
|
| 1453 |
+
// than 32, and the shifting result will be the same.
|
| 1454 |
+
__m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3);
|
| 1455 |
+
__m256i b0 = _mm256_and_si256(b, keep_0);
|
| 1456 |
+
__m256i c0;
|
| 1457 |
+
if (left_shift)
|
| 1458 |
+
c0 = _mm256_sllv_epi32(a0, b0);
|
| 1459 |
+
else
|
| 1460 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1461 |
+
c0 = _mm256_srav_epi32(a0, b0);
|
| 1462 |
+
else
|
| 1463 |
+
c0 = _mm256_srlv_epi32(a0, b0);
|
| 1464 |
+
c0 = _mm256_shuffle_epi8(c0, ctl_3_0);
|
| 1465 |
+
|
| 1466 |
+
// Peform shifting the same way for input array elements with
|
| 1467 |
+
// idx%4==1.
|
| 1468 |
+
__m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3);
|
| 1469 |
+
__m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
|
| 1470 |
+
__m256i c1;
|
| 1471 |
+
if (left_shift)
|
| 1472 |
+
c1 = _mm256_sllv_epi32(a1, b1);
|
| 1473 |
+
else
|
| 1474 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1475 |
+
c1 = _mm256_srav_epi32(a1, b1);
|
| 1476 |
+
else
|
| 1477 |
+
c1 = _mm256_srlv_epi32(a1, b1);
|
| 1478 |
+
c1 = _mm256_shuffle_epi8(c1, ctl_3_1);
|
| 1479 |
+
|
| 1480 |
+
// Peform shifting the same way for input array elements with
|
| 1481 |
+
// idx%4==2.
|
| 1482 |
+
__m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3);
|
| 1483 |
+
__m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0);
|
| 1484 |
+
__m256i c2;
|
| 1485 |
+
if (left_shift)
|
| 1486 |
+
c2 = _mm256_sllv_epi32(a2, b2);
|
| 1487 |
+
else
|
| 1488 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1489 |
+
c2 = _mm256_srav_epi32(a2, b2);
|
| 1490 |
+
else
|
| 1491 |
+
c2 = _mm256_srlv_epi32(a2, b2);
|
| 1492 |
+
c2 = _mm256_shuffle_epi8(c2, ctl_3_2);
|
| 1493 |
+
|
| 1494 |
+
// Peform shifting the same way for input array elements with
|
| 1495 |
+
// idx%4==3.
|
| 1496 |
+
__m256i a3 = _mm256_and_si256(a, keep_3);
|
| 1497 |
+
__m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0);
|
| 1498 |
+
__m256i c3;
|
| 1499 |
+
if (left_shift)
|
| 1500 |
+
c3 = _mm256_sllv_epi32(a3, b3);
|
| 1501 |
+
else
|
| 1502 |
+
if constexpr (std::is_same_v<T, int8_t>)
|
| 1503 |
+
c3 = _mm256_srav_epi32(a3, b3);
|
| 1504 |
+
else
|
| 1505 |
+
c3 = _mm256_srlv_epi32(a3, b3);
|
| 1506 |
+
c3 = _mm256_and_si256(c3, keep_3);
|
| 1507 |
+
|
| 1508 |
+
// Merge partial results into the final result.
|
| 1509 |
+
__m256i c01 = _mm256_or_si256(c0, c1);
|
| 1510 |
+
__m256i c23 = _mm256_or_si256(c2, c3);
|
| 1511 |
+
__m256i c = _mm256_or_si256(c01, c23);
|
| 1512 |
+
|
| 1513 |
+
return c;
|
| 1514 |
+
}
|
| 1515 |
+
|
| 1516 |
+
template <>
|
| 1517 |
+
Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1518 |
+
return _mm256_sllv_epi64(a, b);
|
| 1519 |
+
}
|
| 1520 |
+
|
| 1521 |
+
template <>
|
| 1522 |
+
Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1523 |
+
return _mm256_sllv_epi32(a, b);
|
| 1524 |
+
}
|
| 1525 |
+
|
| 1526 |
+
template <>
|
| 1527 |
+
Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1528 |
+
return shift_256_16<true>(a, b);
|
| 1529 |
+
}
|
| 1530 |
+
|
| 1531 |
+
template <>
|
| 1532 |
+
Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1533 |
+
return shift_256_8<true>(a, b);
|
| 1534 |
+
}
|
| 1535 |
+
|
| 1536 |
+
template <>
|
| 1537 |
+
Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1538 |
+
return shift_256_8<true>(a, b);
|
| 1539 |
+
}
|
| 1540 |
+
|
| 1541 |
+
template <>
|
| 1542 |
+
Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 1543 |
+
// No vector instruction for right arithmetic shifting int64_t, so emulating it
|
| 1544 |
+
// instead.
|
| 1545 |
+
|
| 1546 |
+
// Clamp the shift values such that shift values < 0 and > 64 are changed to 64
|
| 1547 |
+
// which results in -1 for negative input and 0 for non-negative input.
|
| 1548 |
+
__m256i zero = _mm256_set1_epi64x(0);
|
| 1549 |
+
__m256i max_shift = _mm256_set1_epi64x(64);
|
| 1550 |
+
__m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift));
|
| 1551 |
+
__m256i shift = _mm256_blendv_epi8(b, max_shift, mask);
|
| 1552 |
+
// Shift the number logically to the right, thus filling the most
|
| 1553 |
+
// significant bits with 0s. Then, replace these bits with the sign
|
| 1554 |
+
// bit.
|
| 1555 |
+
__m256i sign_bits = _mm256_cmpgt_epi64(zero, a);
|
| 1556 |
+
__m256i sign_shift = _mm256_sub_epi64(max_shift, shift);
|
| 1557 |
+
__m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift);
|
| 1558 |
+
__m256i c = _mm256_srlv_epi64(a, shift);
|
| 1559 |
+
c = _mm256_or_si256(c, sign_ext);
|
| 1560 |
+
|
| 1561 |
+
return c;
|
| 1562 |
+
}
|
| 1563 |
+
|
| 1564 |
+
template <>
|
| 1565 |
+
Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
|
| 1566 |
+
return _mm256_srav_epi32(a, b);
|
| 1567 |
+
}
|
| 1568 |
+
|
| 1569 |
+
template <>
|
| 1570 |
+
Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
|
| 1571 |
+
return shift_256_16<false>(a, b);
|
| 1572 |
+
}
|
| 1573 |
+
|
| 1574 |
+
template <>
|
| 1575 |
+
Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
|
| 1576 |
+
return shift_256_8<false>(a, b);
|
| 1577 |
+
}
|
| 1578 |
+
|
| 1579 |
+
template <>
|
| 1580 |
+
Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
|
| 1581 |
+
return shift_256_8<false>(a, b);
|
| 1582 |
+
}
|
| 1583 |
+
|
| 1584 |
+
#endif
|
| 1585 |
+
|
| 1586 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec_mask.h>
|
| 6 |
+
|
| 7 |
+
namespace at::vec {
|
| 8 |
+
inline namespace CPU_CAPABILITY {
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
|
| 11 |
+
|
| 12 |
+
template <typename T, typename mask_t>
|
| 13 |
+
struct VecMaskLoad<
|
| 14 |
+
T,
|
| 15 |
+
1,
|
| 16 |
+
mask_t,
|
| 17 |
+
1,
|
| 18 |
+
typename std::enable_if_t<
|
| 19 |
+
std::is_same_v<T, float> || std::is_same_v<T, int32_t> ||
|
| 20 |
+
std::is_same_v<T, uint32_t>,
|
| 21 |
+
void>> {
|
| 22 |
+
static inline VectorizedN<T, 1> apply(
|
| 23 |
+
const T* ptr,
|
| 24 |
+
const VecMask<mask_t, 1>& vec_mask) {
|
| 25 |
+
auto int_mask = vec_mask.template cast<int, 1>()[0];
|
| 26 |
+
if constexpr (std::is_same_v<T, float>) {
|
| 27 |
+
return Vectorized<T>(_mm256_maskload_ps(ptr, int_mask));
|
| 28 |
+
} else {
|
| 29 |
+
return Vectorized<T>(_mm256_maskload_epi32(ptr, int_mask));
|
| 30 |
+
}
|
| 31 |
+
}
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
// TODO: add specialization of VecMaskLoad for bfloat16/half and int8/uint8
|
| 35 |
+
|
| 36 |
+
template <>
|
| 37 |
+
struct VecMaskCast<float, 1, int, 1> {
|
| 38 |
+
static inline VecMask<float, 1> apply(const VecMask<int, 1>& vec_mask) {
|
| 39 |
+
return Vectorized<float>(_mm256_castsi256_ps(vec_mask[0]));
|
| 40 |
+
}
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
struct VecMaskCast<int, 1, float, 1> {
|
| 45 |
+
static inline VecMask<int, 1> apply(const VecMask<float, 1>& vec_mask) {
|
| 46 |
+
return Vectorized<int>(_mm256_castps_si256(vec_mask[0]));
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
template <typename dst_t>
|
| 51 |
+
struct VecMaskCast<dst_t, 1, int64_t, 2> {
|
| 52 |
+
static inline VecMask<dst_t, 1> apply(const VecMask<int64_t, 2>& vec_mask) {
|
| 53 |
+
auto int_vec = convert<int, 1, int64_t, 2>(VectorizedN<int64_t, 2>(vec_mask));
|
| 54 |
+
return VecMask<int, 1>(int_vec).cast<dst_t, 1>();
|
| 55 |
+
}
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
template <>
|
| 59 |
+
inline bool VecMask<int, 1>::all_zero() const {
|
| 60 |
+
return _mm256_testz_si256(mask_[0], mask_[0]);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
template <>
|
| 64 |
+
inline bool VecMask<int, 1>::is_masked(int i) const {
|
| 65 |
+
return _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0])) & (1 << i);
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
template <>
|
| 69 |
+
inline bool VecMask<int, 1>::all_masked() const {
|
| 70 |
+
int mask = _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0]));
|
| 71 |
+
return mask == 0xff;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
#define VEC_MASK_METHOD_WITH_CAST_TO_INT( \
|
| 75 |
+
T, N, return_type, method, args_def, args) \
|
| 76 |
+
template <> \
|
| 77 |
+
inline return_type VecMask<T, N>::method args_def const { \
|
| 78 |
+
return cast<int, 1>().method args; \
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_zero, (), ())
|
| 82 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_zero, (), ())
|
| 83 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, is_masked, (int i), (i))
|
| 84 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, is_masked, (int i), (i))
|
| 85 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_masked, (), ())
|
| 86 |
+
VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_masked, (), ())
|
| 87 |
+
|
| 88 |
+
#undef VEC_MASK_DEFINE_METHOD_WITH_CAST_TO_INT
|
| 89 |
+
|
| 90 |
+
#endif
|
| 91 |
+
|
| 92 |
+
} // namespace CPU_CAPABILITY
|
| 93 |
+
} // namespace at::vec
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h
ADDED
|
@@ -0,0 +1,1341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <ATen/native/quantized/AffineQuantizerBase.h>
|
| 9 |
+
|
| 10 |
+
#include <c10/util/irange.h>
|
| 11 |
+
#include <c10/util/qint32.h>
|
| 12 |
+
#include <c10/util/qint8.h>
|
| 13 |
+
#include <c10/util/quint8.h>
|
| 14 |
+
|
| 15 |
+
#include <array>
|
| 16 |
+
#include <cmath>
|
| 17 |
+
|
| 18 |
+
// This file defines Vectorized<> for the quantized types.
|
| 19 |
+
//
|
| 20 |
+
//
|
| 21 |
+
// Currently, we simply use these classes as efficient converters between
|
| 22 |
+
// the quantized types and Vectorized<float>, usually in bandwidth-bound cases
|
| 23 |
+
// where doing the arithmetic in full-precision is acceptable (e.g.
|
| 24 |
+
// elementwise operators).
|
| 25 |
+
//
|
| 26 |
+
//
|
| 27 |
+
// Conversions are as follows:
|
| 28 |
+
// Vectorized<qint8> -> 4x Vectorized<float>
|
| 29 |
+
// Vectorized<quint8> -> 4x Vectorized<float>
|
| 30 |
+
// Vectorized<qint32> -> 1x Vectorized<float>
|
| 31 |
+
//
|
| 32 |
+
// The size of the returned float vector is specified by the special
|
| 33 |
+
// constexpr function float_num_vecs. The type of the value returned
|
| 34 |
+
// from dequantize (and expected as an argument to quantize) is
|
| 35 |
+
// specified by float_vec_return_type.
|
| 36 |
+
//
|
| 37 |
+
// When writing kernels with these vectors, it is expected that floating-
|
| 38 |
+
// point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
|
| 39 |
+
// iterations.
|
| 40 |
+
|
| 41 |
+
namespace at::vec {
|
| 42 |
+
inline namespace CPU_CAPABILITY {
|
| 43 |
+
|
| 44 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 45 |
+
|
| 46 |
+
#ifdef _MSC_VER
|
| 47 |
+
__declspec(align(64)) struct Vectorizedqi {
|
| 48 |
+
protected:
|
| 49 |
+
__m256i vals;
|
| 50 |
+
#else
|
| 51 |
+
struct Vectorizedqi {
|
| 52 |
+
protected:
|
| 53 |
+
__m256i vals __attribute__((aligned(64)));
|
| 54 |
+
#endif
|
| 55 |
+
|
| 56 |
+
public:
|
| 57 |
+
Vectorizedqi() {}
|
| 58 |
+
Vectorizedqi(__m256i v) : vals(v) {}
|
| 59 |
+
operator __m256i() const {
|
| 60 |
+
return vals;
|
| 61 |
+
}
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
template <typename T>
|
| 65 |
+
__m256i pack_saturate_and_clamp(
|
| 66 |
+
__m256i first,
|
| 67 |
+
__m256i second,
|
| 68 |
+
T min_val,
|
| 69 |
+
T max_val);
|
| 70 |
+
|
| 71 |
+
template <>
|
| 72 |
+
inline __m256i pack_saturate_and_clamp<int32_t>(
|
| 73 |
+
__m256i /*first*/,
|
| 74 |
+
__m256i /*second*/,
|
| 75 |
+
int32_t /*min_val*/,
|
| 76 |
+
int32_t /*max_val*/) {
|
| 77 |
+
// This function is for linkage only, will not be used
|
| 78 |
+
AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template <>
|
| 82 |
+
inline __m256i pack_saturate_and_clamp<int8_t>(
|
| 83 |
+
__m256i first,
|
| 84 |
+
__m256i second,
|
| 85 |
+
int8_t min_val,
|
| 86 |
+
int8_t max_val) {
|
| 87 |
+
__m256i packed_and_sat = _mm256_packs_epi16(first, second);
|
| 88 |
+
return _mm256_max_epi8(
|
| 89 |
+
_mm256_set1_epi8(min_val),
|
| 90 |
+
_mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val)));
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template <>
|
| 94 |
+
inline __m256i pack_saturate_and_clamp<uint8_t>(
|
| 95 |
+
__m256i first,
|
| 96 |
+
__m256i second,
|
| 97 |
+
uint8_t min_val,
|
| 98 |
+
uint8_t max_val) {
|
| 99 |
+
__m256i packed_and_sat = _mm256_packus_epi16(first, second);
|
| 100 |
+
return _mm256_max_epu8(
|
| 101 |
+
_mm256_set1_epi8(min_val),
|
| 102 |
+
_mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val)));
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
template <typename T>
|
| 106 |
+
typename std::enable_if_t<std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, at::vec::Vectorized<float>>
|
| 107 |
+
inline convert_int8_to_float(at::vec::Vectorized<T> src) {
|
| 108 |
+
// Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
|
| 109 |
+
// Only handle first 8*8 bits
|
| 110 |
+
__m128i input_128 = _mm256_castsi256_si128(src);
|
| 111 |
+
// Convert from 8*uint8/int8 to 8*int32
|
| 112 |
+
__m256i input_256_int32;
|
| 113 |
+
if constexpr (std::is_same_v<T, uint8_t>)
|
| 114 |
+
input_256_int32 = _mm256_cvtepu8_epi32(input_128);
|
| 115 |
+
else
|
| 116 |
+
input_256_int32 = _mm256_cvtepi8_epi32(input_128);
|
| 117 |
+
// Convert from 8*int32 to 8*float
|
| 118 |
+
return _mm256_cvtepi32_ps(input_256_int32);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
template <typename T>
|
| 122 |
+
typename std::enable_if_t<std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, at::vec::Vectorized<T>>
|
| 123 |
+
inline convert_float_to_int8(at::vec::Vectorized<float> src) {
|
| 124 |
+
// Convert from float32 to int32 with truncation
|
| 125 |
+
__m256i x_values_int32 = _mm256_cvttps_epi32(src);
|
| 126 |
+
|
| 127 |
+
// Convert from int32 to int16 using signed saturation
|
| 128 |
+
__m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32);
|
| 129 |
+
|
| 130 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 131 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 132 |
+
|
| 133 |
+
// Convert from int16 to uint8/int8 using unsigned saturation
|
| 134 |
+
__m256i xyzw_clamped_v = pack_saturate_and_clamp<T>(
|
| 135 |
+
xy_packed_v, xy_packed_v, min_val, max_val);
|
| 136 |
+
__m256i permute_mask_v =
|
| 137 |
+
_mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
|
| 138 |
+
return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
template <typename T>
|
| 142 |
+
__FORCE_INLINE void QuantizeAvx2(
|
| 143 |
+
const float* src,
|
| 144 |
+
T* dst,
|
| 145 |
+
int len,
|
| 146 |
+
float inverse_scale,
|
| 147 |
+
int64_t zero_point) {
|
| 148 |
+
constexpr int VLEN = 8;
|
| 149 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 150 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 151 |
+
const __m256i min_v = _mm256_set1_epi32(min_val);
|
| 152 |
+
const __m256i max_v = _mm256_set1_epi32(max_val);
|
| 153 |
+
// This is the largest int32 value < int32_max exactly representable in float
|
| 154 |
+
constexpr int32_t int32_float_max_val =
|
| 155 |
+
std::numeric_limits<int32_t>::max() - 127;
|
| 156 |
+
int i = 0;
|
| 157 |
+
__m256 inverse_scale_v = _mm256_set1_ps(inverse_scale);
|
| 158 |
+
// clang-format off
|
| 159 |
+
static const __m256i shuffle_mask_v = _mm256_set_epi8(
|
| 160 |
+
0xff, 0xff, 0xff, 0xff,
|
| 161 |
+
0xff, 0xff, 0xff, 0xff,
|
| 162 |
+
0xff, 0xff, 0xff, 0xff,
|
| 163 |
+
0x0c, 0x08, 0x04, 0x00,
|
| 164 |
+
0xff, 0xff, 0xff, 0xff,
|
| 165 |
+
0xff, 0xff, 0xff, 0xff,
|
| 166 |
+
0xff, 0xff, 0xff, 0xff,
|
| 167 |
+
0x0c, 0x08, 0x04, 0x00);
|
| 168 |
+
// clang-format on
|
| 169 |
+
__m256i permute_mask_v =
|
| 170 |
+
_mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
|
| 171 |
+
__m256i permute_mask_l8_v =
|
| 172 |
+
_mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00);
|
| 173 |
+
int len_aligned = len / (VLEN * 4) * (VLEN * 4);
|
| 174 |
+
for (; i < len_aligned; i += 4 * VLEN) {
|
| 175 |
+
// x
|
| 176 |
+
__m256 x_vals = _mm256_load_ps(src + i);
|
| 177 |
+
__m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
|
| 178 |
+
// If the floating point value is greater than int32_max,
|
| 179 |
+
// _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
|
| 180 |
+
// Clip at int32_float_max_val to avoid this.
|
| 181 |
+
x_transformed_v =
|
| 182 |
+
_mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
|
| 183 |
+
// y
|
| 184 |
+
__m256 y_vals = _mm256_load_ps(src + i + VLEN);
|
| 185 |
+
__m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v);
|
| 186 |
+
y_transformed_v =
|
| 187 |
+
_mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val));
|
| 188 |
+
// z
|
| 189 |
+
__m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN);
|
| 190 |
+
__m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v);
|
| 191 |
+
z_transformed_v =
|
| 192 |
+
_mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val));
|
| 193 |
+
// w
|
| 194 |
+
__m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN);
|
| 195 |
+
__m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v);
|
| 196 |
+
w_transformed_v =
|
| 197 |
+
_mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val));
|
| 198 |
+
|
| 199 |
+
__m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
|
| 200 |
+
__m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v);
|
| 201 |
+
__m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v);
|
| 202 |
+
__m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v);
|
| 203 |
+
|
| 204 |
+
// add zero point
|
| 205 |
+
x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
|
| 206 |
+
y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point));
|
| 207 |
+
z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point));
|
| 208 |
+
w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point));
|
| 209 |
+
|
| 210 |
+
__m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v);
|
| 211 |
+
__m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v);
|
| 212 |
+
__m256i xyzw_clamped_v =
|
| 213 |
+
pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
|
| 214 |
+
|
| 215 |
+
xyzw_clamped_v =
|
| 216 |
+
_mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
|
| 217 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v);
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
// Additional 8-lane AVX2 version to take advantage when len is smaller
|
| 221 |
+
// based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
|
| 222 |
+
for (; i < len / VLEN * VLEN; i += VLEN) {
|
| 223 |
+
__m256 x_vals = _mm256_load_ps(src + i);
|
| 224 |
+
__m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
|
| 225 |
+
x_transformed_v =
|
| 226 |
+
_mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
|
| 227 |
+
__m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
|
| 228 |
+
x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
|
| 229 |
+
__m256i x_clipped_v =
|
| 230 |
+
_mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v));
|
| 231 |
+
|
| 232 |
+
x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v);
|
| 233 |
+
x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v);
|
| 234 |
+
_mm_storel_epi64(
|
| 235 |
+
reinterpret_cast<__m128i*>(dst + i),
|
| 236 |
+
_mm256_castsi256_si128(x_clipped_v));
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
for (; i < len; ++i) {
|
| 240 |
+
float transformed = src[i] * inverse_scale;
|
| 241 |
+
|
| 242 |
+
// Not exactly the same behavior as the vectorized code.
|
| 243 |
+
// The vectorized code above always rounds to even in halfway cases
|
| 244 |
+
// (https://software.intel.com/en-us/node/523819), but std::nearbyint
|
| 245 |
+
// does the same only when the current rounding mode is FE_TONEAREST.
|
| 246 |
+
// However, in practice, this should not be a problem because most cases
|
| 247 |
+
// use the default rounding mode FE_TONEAREST.
|
| 248 |
+
// Note that we cannot implement the same behavior as the vectorized code
|
| 249 |
+
// using std::round because it does rounding away from zero in halfway
|
| 250 |
+
// cases.
|
| 251 |
+
transformed = zero_point + std::nearbyint(transformed);
|
| 252 |
+
float clipped =
|
| 253 |
+
std::min(std::max(transformed, float(min_val)), float(max_val));
|
| 254 |
+
dst[i] = clipped;
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
template<>
|
| 259 |
+
struct Vectorized<c10::qint32> : public Vectorizedqi {
|
| 260 |
+
using size_type = int;
|
| 261 |
+
static constexpr size_type size() {
|
| 262 |
+
return 8;
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
static constexpr int float_num_vecs() {
|
| 266 |
+
return 1;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
static constexpr int int_num_vecs() {
|
| 270 |
+
return 1;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
using float_vec_return_type = std::array<Vectorized<float>, 1>;
|
| 274 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
|
| 275 |
+
using value_type = c10::qint32::underlying;
|
| 276 |
+
|
| 277 |
+
public:
|
| 278 |
+
using Vectorizedqi::Vectorizedqi;
|
| 279 |
+
Vectorized() {}
|
| 280 |
+
|
| 281 |
+
Vectorized(__m256i vals_) { vals = vals_;}
|
| 282 |
+
|
| 283 |
+
// Broadcast constructor
|
| 284 |
+
Vectorized(const c10::qint32& val) {
|
| 285 |
+
value_type uw = val.val_;
|
| 286 |
+
vals = _mm256_set1_epi32(uw);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
void store(void* ptr, int count = size()) const {
|
| 290 |
+
if (count != size()) {
|
| 291 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 292 |
+
} else {
|
| 293 |
+
_mm256_storeu_si256((__m256i*)ptr, vals);
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
static Vectorized<c10::qint32> loadu(const void* ptr) {
|
| 298 |
+
return Vectorized<c10::qint32>(ptr);
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
|
| 302 |
+
__at_align__ value_type tmp_values[size()];
|
| 303 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 304 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 305 |
+
// instructions while a loop would be compiled to one instruction.
|
| 306 |
+
for (const auto i : c10::irange(size())) {
|
| 307 |
+
tmp_values[i] = 0;
|
| 308 |
+
}
|
| 309 |
+
std::memcpy(
|
| 310 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 311 |
+
return _mm256_loadu_si256((const __m256i*)tmp_values);
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
float_vec_return_type dequantize(
|
| 315 |
+
Vectorized<float> scale,
|
| 316 |
+
Vectorized<float> /*zero_point*/,
|
| 317 |
+
Vectorized<float> scale_zp_premul) const {
|
| 318 |
+
__m256 float_vals = _mm256_cvtepi32_ps(vals);
|
| 319 |
+
return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
float_vec_return_type dequantize(
|
| 323 |
+
Vectorized<float> scale,
|
| 324 |
+
Vectorized<float> zero_point) const {
|
| 325 |
+
__m256 float_vals = _mm256_cvtepi32_ps(vals);
|
| 326 |
+
return {(Vectorized<float>(float_vals) - zero_point) * scale};
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
static Vectorized<c10::qint32> quantize(
|
| 330 |
+
const float_vec_return_type& rhs,
|
| 331 |
+
float scale,
|
| 332 |
+
int32_t zero_point,
|
| 333 |
+
float /*inverse_scale*/) {
|
| 334 |
+
Vectorized<c10::qint32> retval;
|
| 335 |
+
auto rhs_data = (__m256)rhs[0];
|
| 336 |
+
at::native::quantize_vec<c10::qint32, /*precision=*/32>(
|
| 337 |
+
scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8);
|
| 338 |
+
return retval;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
|
| 342 |
+
return _mm256_max_epi32(vals, b.vals);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
|
| 346 |
+
return _mm256_min_epi32(vals, b.vals);
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 350 |
+
return maximum(zero_point);
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
Vectorized<c10::qint32> relu6(
|
| 354 |
+
Vectorized<c10::qint32> zero_point,
|
| 355 |
+
Vectorized<c10::qint32> q_six) {
|
| 356 |
+
return _mm256_min_epi32(
|
| 357 |
+
_mm256_max_epi32(vals, zero_point.vals), q_six.vals);
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 361 |
+
return {_mm256_sub_epi32(vals, b)};
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 365 |
+
const int_vec_return_type& inp,
|
| 366 |
+
float multiplier,
|
| 367 |
+
int32_t zero_point) {
|
| 368 |
+
__m256 multiplier_v = _mm256_set1_ps(multiplier);
|
| 369 |
+
__m256i zero_point_v = _mm256_set1_epi32(zero_point);
|
| 370 |
+
|
| 371 |
+
__m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v);
|
| 372 |
+
__m256i rounded = _mm256_cvtps_epi32(scaled);
|
| 373 |
+
return _mm256_add_epi32(rounded, zero_point_v);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
private:
|
| 377 |
+
// Load from memory constructor
|
| 378 |
+
Vectorized(const void* ptr) {
|
| 379 |
+
vals = _mm256_loadu_si256((const __m256i*)ptr);
|
| 380 |
+
}
|
| 381 |
+
};
|
| 382 |
+
|
| 383 |
+
template <>
|
| 384 |
+
Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 385 |
+
return a.maximum(b);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
template <>
|
| 389 |
+
Vectorized<c10::qint32> inline operator*(
|
| 390 |
+
const Vectorized<c10::qint32>& a,
|
| 391 |
+
const Vectorized<c10::qint32>& b) {
|
| 392 |
+
return _mm256_mullo_epi32(a, b);
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
template <>
|
| 396 |
+
Vectorized<c10::qint32> inline operator+(
|
| 397 |
+
const Vectorized<c10::qint32>& a,
|
| 398 |
+
const Vectorized<c10::qint32>& b) {
|
| 399 |
+
return _mm256_add_epi32(a, b);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
/*
|
| 403 |
+
* Convert values from int32 back to int8/uint8
|
| 404 |
+
*/
|
| 405 |
+
template <typename T>
|
| 406 |
+
__m256i RequantizeAvx2(
|
| 407 |
+
const std::array<Vectorized<c10::qint32>, 4>& inp,
|
| 408 |
+
__m256 multiplier,
|
| 409 |
+
__m256i zp) {
|
| 410 |
+
static_assert(
|
| 411 |
+
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>,
|
| 412 |
+
"Only int8_t/uint8_t are supported");
|
| 413 |
+
constexpr auto min_val = std::numeric_limits<T>::min();
|
| 414 |
+
constexpr auto max_val = std::numeric_limits<T>::max();
|
| 415 |
+
__m256i permute_mask_v =
|
| 416 |
+
_mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
|
| 417 |
+
__m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier);
|
| 418 |
+
__m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier);
|
| 419 |
+
__m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier);
|
| 420 |
+
__m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier);
|
| 421 |
+
|
| 422 |
+
__m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v);
|
| 423 |
+
__m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v);
|
| 424 |
+
__m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v);
|
| 425 |
+
__m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v);
|
| 426 |
+
|
| 427 |
+
/* Add zero point */
|
| 428 |
+
__m256i x_v = _mm256_add_epi32(x_rounded_v, zp);
|
| 429 |
+
__m256i y_v = _mm256_add_epi32(y_rounded_v, zp);
|
| 430 |
+
__m256i z_v = _mm256_add_epi32(z_rounded_v, zp);
|
| 431 |
+
__m256i w_v = _mm256_add_epi32(w_rounded_v, zp);
|
| 432 |
+
|
| 433 |
+
/* Pack to int16_t and saturate */
|
| 434 |
+
__m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v);
|
| 435 |
+
__m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v);
|
| 436 |
+
|
| 437 |
+
__m256i xyzw_clamped_v =
|
| 438 |
+
pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
|
| 439 |
+
|
| 440 |
+
/*
|
| 441 |
+
* xyzw_clamped_v has results in the following layout so we need to
|
| 442 |
+
* permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7
|
| 443 |
+
*/
|
| 444 |
+
xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
|
| 445 |
+
return xyzw_clamped_v;
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
template<>
|
| 449 |
+
struct Vectorized<c10::qint8> : public Vectorizedqi {
|
| 450 |
+
static constexpr int size() {
|
| 451 |
+
return 32;
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
static constexpr int float_num_vecs() {
|
| 455 |
+
return 4;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
static constexpr int int_num_vecs() {
|
| 459 |
+
return 4;
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
| 463 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
| 464 |
+
using value_type = typename c10::qint8::underlying;
|
| 465 |
+
|
| 466 |
+
public:
|
| 467 |
+
using Vectorizedqi::Vectorizedqi;
|
| 468 |
+
|
| 469 |
+
Vectorized() {}
|
| 470 |
+
Vectorized(__m256i vals_) { vals = vals_;}
|
| 471 |
+
|
| 472 |
+
// Broadcast constructor
|
| 473 |
+
Vectorized(const c10::qint8& val) {
|
| 474 |
+
value_type uw = val.val_;
|
| 475 |
+
vals = _mm256_set1_epi8(uw);
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
// This is needed because the compiler emits awful code for the default
|
| 479 |
+
// constructor for moving the enum
|
| 480 |
+
// NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
|
| 481 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 482 |
+
#if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
|
| 483 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
|
| 484 |
+
#endif
|
| 485 |
+
Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
|
| 486 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
| 487 |
+
|
| 488 |
+
void store(void* ptr, int count = size()) const {
|
| 489 |
+
if (count != size()) {
|
| 490 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 491 |
+
} else {
|
| 492 |
+
_mm256_storeu_si256((__m256i*)ptr, vals);
|
| 493 |
+
}
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
static Vectorized<c10::qint8> loadu(const void* ptr) {
|
| 497 |
+
return Vectorized<c10::qint8>(ptr);
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
|
| 501 |
+
__at_align__ value_type tmp_values[size()];
|
| 502 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 503 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 504 |
+
// instructions while a loop would be compiled to one instruction.
|
| 505 |
+
for (const auto i : c10::irange(size())) {
|
| 506 |
+
tmp_values[i] = 0;
|
| 507 |
+
}
|
| 508 |
+
std::memcpy(
|
| 509 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 510 |
+
return _mm256_loadu_si256((const __m256i*)tmp_values);
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
private:
|
| 514 |
+
__m256i cvtepi8_epi32(__m128i epi8_vals) const {
|
| 515 |
+
return _mm256_cvtepi8_epi32(epi8_vals);
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
public:
|
| 519 |
+
float_vec_return_type dequantize(
|
| 520 |
+
Vectorized<float> scale,
|
| 521 |
+
Vectorized<float> /*zero_point*/,
|
| 522 |
+
Vectorized<float> scale_neg_zp_premul) const {
|
| 523 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 524 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 525 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 526 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 527 |
+
|
| 528 |
+
__m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
|
| 529 |
+
__m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
|
| 530 |
+
__m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
|
| 531 |
+
__m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
|
| 532 |
+
|
| 533 |
+
auto val0 =
|
| 534 |
+
vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
|
| 535 |
+
auto val1 =
|
| 536 |
+
vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
|
| 537 |
+
auto val2 =
|
| 538 |
+
vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
|
| 539 |
+
auto val3 =
|
| 540 |
+
vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
|
| 541 |
+
return {val0, val1, val2, val3};
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
float_vec_return_type dequantize(
|
| 545 |
+
Vectorized<float> scale,
|
| 546 |
+
Vectorized<float> zero_point) const {
|
| 547 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 548 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 549 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 550 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 551 |
+
|
| 552 |
+
__m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
|
| 553 |
+
__m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
|
| 554 |
+
__m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
|
| 555 |
+
__m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
|
| 556 |
+
|
| 557 |
+
auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
|
| 558 |
+
auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
|
| 559 |
+
auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
|
| 560 |
+
auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
|
| 561 |
+
return {val0, val1, val2, val3};
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
static Vectorized<c10::qint8> quantize(
|
| 565 |
+
const float_vec_return_type& rhs,
|
| 566 |
+
float /*scale*/,
|
| 567 |
+
int32_t zero_point,
|
| 568 |
+
float inverse_scale) {
|
| 569 |
+
auto* rhs_data = (float*)rhs.data();
|
| 570 |
+
int8_t quantized_values[32];
|
| 571 |
+
QuantizeAvx2<value_type>(
|
| 572 |
+
rhs_data, quantized_values, 32, inverse_scale, zero_point);
|
| 573 |
+
return Vectorized<c10::qint8>::loadu(quantized_values);
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
|
| 577 |
+
return _mm256_max_epi8(vals, b.vals);
|
| 578 |
+
}
|
| 579 |
+
|
| 580 |
+
Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
|
| 581 |
+
return _mm256_min_epi8(vals, b.vals);
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
|
| 585 |
+
return maximum(zero_point);
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
Vectorized<c10::qint8> relu6(
|
| 589 |
+
Vectorized<c10::qint8> zero_point,
|
| 590 |
+
Vectorized<c10::qint8> q_six) {
|
| 591 |
+
return _mm256_min_epi8(
|
| 592 |
+
_mm256_max_epi8(vals, zero_point.vals), q_six.vals);
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
|
| 596 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 597 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 598 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 599 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 600 |
+
|
| 601 |
+
__m256i int32_val0 = cvtepi8_epi32(int_val0);
|
| 602 |
+
__m256i int32_val1 = cvtepi8_epi32(int_val1);
|
| 603 |
+
__m256i int32_val2 = cvtepi8_epi32(int_val2);
|
| 604 |
+
__m256i int32_val3 = cvtepi8_epi32(int_val3);
|
| 605 |
+
|
| 606 |
+
__m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
|
| 607 |
+
__m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
|
| 608 |
+
__m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
|
| 609 |
+
__m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
|
| 610 |
+
|
| 611 |
+
__m256i int32_b0 = cvtepi8_epi32(int_b0);
|
| 612 |
+
__m256i int32_b1 = cvtepi8_epi32(int_b1);
|
| 613 |
+
__m256i int32_b2 = cvtepi8_epi32(int_b2);
|
| 614 |
+
__m256i int32_b3 = cvtepi8_epi32(int_b3);
|
| 615 |
+
|
| 616 |
+
__m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
|
| 617 |
+
__m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
|
| 618 |
+
__m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
|
| 619 |
+
__m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
|
| 620 |
+
|
| 621 |
+
return {Vectorized<c10::qint32>(res_0),
|
| 622 |
+
Vectorized<c10::qint32>(res_1),
|
| 623 |
+
Vectorized<c10::qint32>(res_2),
|
| 624 |
+
Vectorized<c10::qint32>(res_3)};
|
| 625 |
+
}
|
| 626 |
+
|
| 627 |
+
static Vectorized<c10::qint8> requantize_from_int(
|
| 628 |
+
const int_vec_return_type& inp,
|
| 629 |
+
float multiplier,
|
| 630 |
+
int32_t zero_point) {
|
| 631 |
+
__m256 multiplier_v = _mm256_set1_ps(multiplier);
|
| 632 |
+
__m256i zero_point_v = _mm256_set1_epi32(zero_point);
|
| 633 |
+
return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
private:
|
| 637 |
+
// Load from memory constructor
|
| 638 |
+
Vectorized(const void* ptr) {
|
| 639 |
+
vals = _mm256_loadu_si256((const __m256i*)ptr);
|
| 640 |
+
}
|
| 641 |
+
};
|
| 642 |
+
|
| 643 |
+
template <>
|
| 644 |
+
Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
|
| 645 |
+
return a.maximum(b);
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
template<>
|
| 649 |
+
struct Vectorized<c10::quint8> : public Vectorizedqi {
|
| 650 |
+
static constexpr int size() {
|
| 651 |
+
return 32;
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
static constexpr int float_num_vecs() {
|
| 655 |
+
return 4;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
static constexpr int int_num_vecs() {
|
| 659 |
+
return 4;
|
| 660 |
+
}
|
| 661 |
+
|
| 662 |
+
using float_vec_return_type = std::array<Vectorized<float>, 4>;
|
| 663 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
|
| 664 |
+
using value_type = typename c10::quint8::underlying;
|
| 665 |
+
|
| 666 |
+
public:
|
| 667 |
+
using Vectorizedqi::Vectorizedqi;
|
| 668 |
+
Vectorized() {}
|
| 669 |
+
|
| 670 |
+
Vectorized(__m256i vals_) { vals = vals_;}
|
| 671 |
+
|
| 672 |
+
// Broadcast constructor
|
| 673 |
+
Vectorized(const c10::quint8& val) {
|
| 674 |
+
value_type uw = val.val_;
|
| 675 |
+
vals = _mm256_set1_epi8(uw);
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
// NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
|
| 679 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
| 680 |
+
#if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
|
| 681 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
|
| 682 |
+
#endif
|
| 683 |
+
Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
|
| 684 |
+
C10_CLANG_DIAGNOSTIC_POP()
|
| 685 |
+
|
| 686 |
+
void store(void* ptr, int count = size()) const {
|
| 687 |
+
if (count != size()) {
|
| 688 |
+
memcpy(ptr, &vals, count * sizeof(value_type));
|
| 689 |
+
} else {
|
| 690 |
+
_mm256_storeu_si256((__m256i*)ptr, vals);
|
| 691 |
+
}
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
static Vectorized<c10::quint8> loadu(const void* ptr) {
|
| 695 |
+
return Vectorized<c10::quint8>(ptr);
|
| 696 |
+
}
|
| 697 |
+
|
| 698 |
+
static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
|
| 699 |
+
__at_align__ value_type tmp_values[size()];
|
| 700 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 701 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 702 |
+
// instructions while a loop would be compiled to one instruction.
|
| 703 |
+
for (const auto i : c10::irange(size())) {
|
| 704 |
+
tmp_values[i] = 0;
|
| 705 |
+
}
|
| 706 |
+
std::memcpy(
|
| 707 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 708 |
+
return _mm256_loadu_si256((const __m256i*)tmp_values);
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
private:
|
| 712 |
+
__m256i cvtepu8_epi32(__m128i epu8_vals) const {
|
| 713 |
+
return _mm256_cvtepu8_epi32(epu8_vals);
|
| 714 |
+
}
|
| 715 |
+
|
| 716 |
+
public:
|
| 717 |
+
float_vec_return_type dequantize(
|
| 718 |
+
Vectorized<float> scale,
|
| 719 |
+
Vectorized<float> /*zero_point*/,
|
| 720 |
+
Vectorized<float> scale_zp_premul) const {
|
| 721 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 722 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 723 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 724 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 725 |
+
|
| 726 |
+
__m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
|
| 727 |
+
__m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
|
| 728 |
+
__m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
|
| 729 |
+
__m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
|
| 730 |
+
|
| 731 |
+
auto val0 =
|
| 732 |
+
vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
|
| 733 |
+
auto val1 =
|
| 734 |
+
vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
|
| 735 |
+
auto val2 =
|
| 736 |
+
vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
|
| 737 |
+
auto val3 =
|
| 738 |
+
vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
|
| 739 |
+
return {val0, val1, val2, val3};
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
float_vec_return_type dequantize(
|
| 743 |
+
Vectorized<float> scale,
|
| 744 |
+
Vectorized<float> zero_point) const {
|
| 745 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 746 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 747 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 748 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 749 |
+
|
| 750 |
+
__m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
|
| 751 |
+
__m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
|
| 752 |
+
__m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
|
| 753 |
+
__m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
|
| 754 |
+
|
| 755 |
+
auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
|
| 756 |
+
auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
|
| 757 |
+
auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
|
| 758 |
+
auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
|
| 759 |
+
return {val0, val1, val2, val3};
|
| 760 |
+
}
|
| 761 |
+
|
| 762 |
+
static Vectorized<c10::quint8> quantize(
|
| 763 |
+
const float_vec_return_type& rhs,
|
| 764 |
+
float /*scale*/,
|
| 765 |
+
int32_t zero_point,
|
| 766 |
+
float inverse_scale) {
|
| 767 |
+
auto* rhs_data = (float*)rhs.data();
|
| 768 |
+
uint8_t quantized_values[32];
|
| 769 |
+
QuantizeAvx2<value_type>(
|
| 770 |
+
rhs_data, quantized_values, 32, inverse_scale, zero_point);
|
| 771 |
+
return Vectorized<c10::quint8>::loadu(quantized_values);
|
| 772 |
+
}
|
| 773 |
+
|
| 774 |
+
Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
|
| 775 |
+
return _mm256_max_epu8(vals, b.vals);
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
|
| 779 |
+
return _mm256_min_epu8(vals, b.vals);
|
| 780 |
+
}
|
| 781 |
+
|
| 782 |
+
Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
|
| 783 |
+
return maximum(zero_point);
|
| 784 |
+
}
|
| 785 |
+
|
| 786 |
+
Vectorized<c10::quint8> relu6(
|
| 787 |
+
Vectorized<c10::quint8> zero_point,
|
| 788 |
+
Vectorized<c10::quint8> q_six) {
|
| 789 |
+
return _mm256_min_epu8(
|
| 790 |
+
_mm256_max_epu8(vals, zero_point.vals), q_six.vals);
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
|
| 794 |
+
__m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
|
| 795 |
+
__m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
|
| 796 |
+
__m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
|
| 797 |
+
__m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
|
| 798 |
+
|
| 799 |
+
__m256i int32_val0 = cvtepu8_epi32(int_val0);
|
| 800 |
+
__m256i int32_val1 = cvtepu8_epi32(int_val1);
|
| 801 |
+
__m256i int32_val2 = cvtepu8_epi32(int_val2);
|
| 802 |
+
__m256i int32_val3 = cvtepu8_epi32(int_val3);
|
| 803 |
+
|
| 804 |
+
__m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
|
| 805 |
+
__m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
|
| 806 |
+
__m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
|
| 807 |
+
__m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
|
| 808 |
+
|
| 809 |
+
__m256i int32_b0 = cvtepu8_epi32(int_b0);
|
| 810 |
+
__m256i int32_b1 = cvtepu8_epi32(int_b1);
|
| 811 |
+
__m256i int32_b2 = cvtepu8_epi32(int_b2);
|
| 812 |
+
__m256i int32_b3 = cvtepu8_epi32(int_b3);
|
| 813 |
+
|
| 814 |
+
__m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
|
| 815 |
+
__m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
|
| 816 |
+
__m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
|
| 817 |
+
__m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
|
| 818 |
+
return {Vectorized<c10::qint32>(res_0),
|
| 819 |
+
Vectorized<c10::qint32>(res_1),
|
| 820 |
+
Vectorized<c10::qint32>(res_2),
|
| 821 |
+
Vectorized<c10::qint32>(res_3)};
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
static Vectorized<c10::quint8> requantize_from_int(
|
| 825 |
+
const int_vec_return_type& inp,
|
| 826 |
+
float multiplier,
|
| 827 |
+
int32_t zero_point) {
|
| 828 |
+
__m256 multiplier_v = _mm256_set1_ps(multiplier);
|
| 829 |
+
__m256i zero_point_v = _mm256_set1_epi32(zero_point);
|
| 830 |
+
return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
|
| 831 |
+
}
|
| 832 |
+
|
| 833 |
+
private:
|
| 834 |
+
|
| 835 |
+
// Load from memory constructor
|
| 836 |
+
Vectorized(const void* ptr) {
|
| 837 |
+
vals = _mm256_loadu_si256((const __m256i*)ptr);
|
| 838 |
+
}
|
| 839 |
+
};
|
| 840 |
+
|
| 841 |
+
template <>
|
| 842 |
+
Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
|
| 843 |
+
return a.maximum(b);
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
#else
|
| 847 |
+
|
| 848 |
+
// NOTE: These are low-performance implementations that we fall back on
|
| 849 |
+
// if we are not building with AVX2. This may not be an issue, because
|
| 850 |
+
// currently for quantization we assume the user has at least AVX512
|
| 851 |
+
// installed, so these can simply act as a reference implementation.
|
| 852 |
+
//
|
| 853 |
+
// If in the future we relax this requirement (AVX2+), we should probably
|
| 854 |
+
// revisit these implementations
|
| 855 |
+
|
| 856 |
+
template <
|
| 857 |
+
typename T,
|
| 858 |
+
typename float_vec_return_type_,
|
| 859 |
+
typename int_vec_return_type_,
|
| 860 |
+
int size_>
|
| 861 |
+
struct VectorizedQuantizedConverter {
|
| 862 |
+
static constexpr int size() {
|
| 863 |
+
return size_;
|
| 864 |
+
}
|
| 865 |
+
|
| 866 |
+
static constexpr int float_num_vecs() {
|
| 867 |
+
return size() / 8;
|
| 868 |
+
}
|
| 869 |
+
|
| 870 |
+
static constexpr int int_num_vecs() {
|
| 871 |
+
return size() / 8;
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
using float_vec_return_type = float_vec_return_type_;
|
| 875 |
+
using int_vec_return_type = int_vec_return_type_;
|
| 876 |
+
|
| 877 |
+
using value_type = typename T::underlying;
|
| 878 |
+
std::array<value_type, size_> vals;
|
| 879 |
+
|
| 880 |
+
VectorizedQuantizedConverter(T val) {
|
| 881 |
+
for (const auto i : c10::irange(size())) {
|
| 882 |
+
vals[i] = val.val_;
|
| 883 |
+
}
|
| 884 |
+
}
|
| 885 |
+
|
| 886 |
+
VectorizedQuantizedConverter(const void* ptr) {
|
| 887 |
+
memcpy(vals.data(), ptr, sizeof(value_type) * size());
|
| 888 |
+
}
|
| 889 |
+
|
| 890 |
+
void store(void* ptr, int count = size()) const {
|
| 891 |
+
memcpy(ptr, vals.data(), count * sizeof(value_type));
|
| 892 |
+
}
|
| 893 |
+
|
| 894 |
+
float_vec_return_type dequantize(
|
| 895 |
+
Vectorized<float> scale,
|
| 896 |
+
Vectorized<float> zero_point,
|
| 897 |
+
Vectorized<float> /*scale_zp_premul*/) const {
|
| 898 |
+
float_vec_return_type rv;
|
| 899 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 900 |
+
float tmp_vals[8];
|
| 901 |
+
for (const auto j : c10::irange(8)) {
|
| 902 |
+
tmp_vals[j] = at::native::dequantize_val<T>(
|
| 903 |
+
scale[j], zero_point[j], T(vals[8 * i + j]));
|
| 904 |
+
}
|
| 905 |
+
rv[i] = Vectorized<float>(tmp_vals[0],
|
| 906 |
+
tmp_vals[1],
|
| 907 |
+
tmp_vals[2],
|
| 908 |
+
tmp_vals[3],
|
| 909 |
+
tmp_vals[4],
|
| 910 |
+
tmp_vals[5],
|
| 911 |
+
tmp_vals[6],
|
| 912 |
+
tmp_vals[7]);
|
| 913 |
+
}
|
| 914 |
+
return rv;
|
| 915 |
+
}
|
| 916 |
+
|
| 917 |
+
float_vec_return_type dequantize(
|
| 918 |
+
Vectorized<float> scale,
|
| 919 |
+
Vectorized<float> zero_point) const {
|
| 920 |
+
Vectorized<float> scale_zp_premul;
|
| 921 |
+
return dequantize(scale, zero_point, scale_zp_premul);
|
| 922 |
+
}
|
| 923 |
+
|
| 924 |
+
protected:
|
| 925 |
+
VectorizedQuantizedConverter() {}
|
| 926 |
+
};
|
| 927 |
+
|
| 928 |
+
template <>
|
| 929 |
+
struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
|
| 930 |
+
c10::qint32,
|
| 931 |
+
std::array<Vectorized<float>, 1>,
|
| 932 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 933 |
+
8> {
|
| 934 |
+
Vectorized()
|
| 935 |
+
: VectorizedQuantizedConverter<
|
| 936 |
+
c10::qint32,
|
| 937 |
+
std::array<Vectorized<float>, 1>,
|
| 938 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 939 |
+
8>() {}
|
| 940 |
+
Vectorized(c10::qint32 val)
|
| 941 |
+
: VectorizedQuantizedConverter<
|
| 942 |
+
c10::qint32,
|
| 943 |
+
std::array<Vectorized<float>, 1>,
|
| 944 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 945 |
+
8>(val) {}
|
| 946 |
+
Vectorized(const void* ptr)
|
| 947 |
+
: VectorizedQuantizedConverter<
|
| 948 |
+
c10::qint32,
|
| 949 |
+
std::array<Vectorized<float>, 1>,
|
| 950 |
+
std::array<Vectorized<c10::qint32>, 1>,
|
| 951 |
+
8>(ptr) {}
|
| 952 |
+
|
| 953 |
+
static Vectorized<c10::qint32> loadu(const void* ptr) {
|
| 954 |
+
return Vectorized<c10::qint32>(ptr);
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
|
| 958 |
+
__at_align__ value_type tmp_values[size()];
|
| 959 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 960 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 961 |
+
// instructions while a loop would be compiled to one instruction.
|
| 962 |
+
for (const auto i : c10::irange(size())) {
|
| 963 |
+
tmp_values[i] = 0;
|
| 964 |
+
}
|
| 965 |
+
std::memcpy(
|
| 966 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 967 |
+
return Vectorized<c10::qint32>(tmp_values);
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
static Vectorized<c10::qint32> quantize(
|
| 971 |
+
const float_vec_return_type& rhs,
|
| 972 |
+
float scale,
|
| 973 |
+
int32_t zero_point,
|
| 974 |
+
float /*inverse_scale*/) {
|
| 975 |
+
std::array<value_type, size()> qvals;
|
| 976 |
+
std::array<float, float_num_vecs() * 8> float_vals;
|
| 977 |
+
|
| 978 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 979 |
+
rhs[i].store(&float_vals[i * 8], 8);
|
| 980 |
+
}
|
| 981 |
+
|
| 982 |
+
at::native::quantize_vec<c10::qint32, /*precision=*/32>(
|
| 983 |
+
scale,
|
| 984 |
+
zero_point,
|
| 985 |
+
float_vals.data(),
|
| 986 |
+
(c10::qint32*)qvals.data(),
|
| 987 |
+
8 * float_num_vecs());
|
| 988 |
+
|
| 989 |
+
return Vectorized<c10::qint32>::loadu(qvals.data());
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
|
| 993 |
+
Vectorized<c10::qint32> retval;
|
| 994 |
+
for (const auto i : c10::irange(size())) {
|
| 995 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 996 |
+
}
|
| 997 |
+
return retval;
|
| 998 |
+
}
|
| 999 |
+
|
| 1000 |
+
Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
|
| 1001 |
+
Vectorized<c10::qint32> retval;
|
| 1002 |
+
for (const auto i : c10::irange(size())) {
|
| 1003 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1004 |
+
}
|
| 1005 |
+
return retval;
|
| 1006 |
+
}
|
| 1007 |
+
|
| 1008 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 1009 |
+
return maximum(zero_point);
|
| 1010 |
+
}
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
Vectorized<c10::qint32> relu6(
|
| 1014 |
+
Vectorized<c10::qint32> zero_point,
|
| 1015 |
+
Vectorized<c10::qint32> q_six) {
|
| 1016 |
+
Vectorized<c10::qint32> retval;
|
| 1017 |
+
for (const auto i : c10::irange(size())) {
|
| 1018 |
+
retval.vals[i] = std::min<value_type>(
|
| 1019 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1020 |
+
}
|
| 1021 |
+
return retval;
|
| 1022 |
+
}
|
| 1023 |
+
|
| 1024 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 1025 |
+
int_vec_return_type retval;
|
| 1026 |
+
for (const auto i : c10::irange(size())) {
|
| 1027 |
+
retval[0].vals[i] = vals[i] - b.vals[i];
|
| 1028 |
+
}
|
| 1029 |
+
return retval;
|
| 1030 |
+
}
|
| 1031 |
+
|
| 1032 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 1033 |
+
const int_vec_return_type& inp,
|
| 1034 |
+
float multiplier,
|
| 1035 |
+
int32_t zero_point) {
|
| 1036 |
+
Vectorized<c10::qint32> retval;
|
| 1037 |
+
for (const auto i : c10::irange(size())) {
|
| 1038 |
+
retval.vals[i] =
|
| 1039 |
+
std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
|
| 1040 |
+
zero_point;
|
| 1041 |
+
}
|
| 1042 |
+
return retval;
|
| 1043 |
+
}
|
| 1044 |
+
};
|
| 1045 |
+
|
| 1046 |
+
template <>
|
| 1047 |
+
Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 1048 |
+
return a.maximum(b);
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
template <>
|
| 1052 |
+
Vectorized<c10::qint32> inline operator*(
|
| 1053 |
+
const Vectorized<c10::qint32>& a,
|
| 1054 |
+
const Vectorized<c10::qint32>& b) {
|
| 1055 |
+
Vectorized<c10::qint32> retval;
|
| 1056 |
+
for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
|
| 1057 |
+
retval.vals[i] = a.vals[i] * b.vals[i];
|
| 1058 |
+
}
|
| 1059 |
+
return retval;
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
template <>
|
| 1063 |
+
Vectorized<c10::qint32> inline operator+(
|
| 1064 |
+
const Vectorized<c10::qint32>& a,
|
| 1065 |
+
const Vectorized<c10::qint32>& b) {
|
| 1066 |
+
Vectorized<c10::qint32> retval;
|
| 1067 |
+
for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
|
| 1068 |
+
retval.vals[i] = a.vals[i] + b.vals[i];
|
| 1069 |
+
}
|
| 1070 |
+
return retval;
|
| 1071 |
+
}
|
| 1072 |
+
|
| 1073 |
+
template <>
|
| 1074 |
+
struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
|
| 1075 |
+
c10::qint8,
|
| 1076 |
+
std::array<Vectorized<float>, 4>,
|
| 1077 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1078 |
+
32> {
|
| 1079 |
+
Vectorized()
|
| 1080 |
+
: VectorizedQuantizedConverter<
|
| 1081 |
+
c10::qint8,
|
| 1082 |
+
std::array<Vectorized<float>, 4>,
|
| 1083 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1084 |
+
32>() {}
|
| 1085 |
+
Vectorized(c10::qint8 val)
|
| 1086 |
+
: VectorizedQuantizedConverter<
|
| 1087 |
+
c10::qint8,
|
| 1088 |
+
std::array<Vectorized<float>, 4>,
|
| 1089 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1090 |
+
32>(val) {}
|
| 1091 |
+
Vectorized(const void* ptr)
|
| 1092 |
+
: VectorizedQuantizedConverter<
|
| 1093 |
+
c10::qint8,
|
| 1094 |
+
std::array<Vectorized<float>, 4>,
|
| 1095 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1096 |
+
32>(ptr) {}
|
| 1097 |
+
|
| 1098 |
+
static Vectorized<c10::qint8> loadu(const void* ptr) {
|
| 1099 |
+
return Vectorized<c10::qint8>(ptr);
|
| 1100 |
+
}
|
| 1101 |
+
|
| 1102 |
+
static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
|
| 1103 |
+
__at_align__ value_type tmp_values[size()];
|
| 1104 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 1105 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 1106 |
+
// instructions while a loop would be compiled to one instruction.
|
| 1107 |
+
for (const auto i : c10::irange(size())) {
|
| 1108 |
+
tmp_values[i] = 0;
|
| 1109 |
+
}
|
| 1110 |
+
std::memcpy(
|
| 1111 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 1112 |
+
return Vectorized<c10::qint8>(tmp_values);
|
| 1113 |
+
}
|
| 1114 |
+
|
| 1115 |
+
static Vectorized<c10::qint8> quantize(
|
| 1116 |
+
const float_vec_return_type& rhs,
|
| 1117 |
+
float scale,
|
| 1118 |
+
int32_t zero_point,
|
| 1119 |
+
float /*inverse_scale*/) {
|
| 1120 |
+
std::array<value_type, size()> qvals;
|
| 1121 |
+
std::array<float, float_num_vecs() * 8> float_vals;
|
| 1122 |
+
|
| 1123 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 1124 |
+
rhs[i].store(&float_vals[i * 8], 8);
|
| 1125 |
+
}
|
| 1126 |
+
|
| 1127 |
+
at::native::quantize_vec<c10::qint8>(
|
| 1128 |
+
scale,
|
| 1129 |
+
zero_point,
|
| 1130 |
+
float_vals.data(),
|
| 1131 |
+
(c10::qint8*)qvals.data(),
|
| 1132 |
+
8 * float_num_vecs());
|
| 1133 |
+
|
| 1134 |
+
return Vectorized<c10::qint8>::loadu(qvals.data());
|
| 1135 |
+
}
|
| 1136 |
+
|
| 1137 |
+
Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
|
| 1138 |
+
Vectorized<c10::qint8> retval;
|
| 1139 |
+
for (const auto i : c10::irange(size())) {
|
| 1140 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 1141 |
+
}
|
| 1142 |
+
return retval;
|
| 1143 |
+
}
|
| 1144 |
+
|
| 1145 |
+
Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
|
| 1146 |
+
Vectorized<c10::qint8> retval;
|
| 1147 |
+
for (const auto i : c10::irange(size())) {
|
| 1148 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1149 |
+
}
|
| 1150 |
+
return retval;
|
| 1151 |
+
}
|
| 1152 |
+
|
| 1153 |
+
Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
|
| 1154 |
+
return maximum(zero_point);
|
| 1155 |
+
}
|
| 1156 |
+
|
| 1157 |
+
Vectorized<c10::qint8> relu6(
|
| 1158 |
+
Vectorized<c10::qint8> zero_point,
|
| 1159 |
+
Vectorized<c10::qint8> q_six) {
|
| 1160 |
+
Vectorized<c10::qint8> retval;
|
| 1161 |
+
for (const auto i : c10::irange(size())) {
|
| 1162 |
+
retval.vals[i] = std::min<value_type>(
|
| 1163 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1164 |
+
}
|
| 1165 |
+
return retval;
|
| 1166 |
+
}
|
| 1167 |
+
|
| 1168 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
|
| 1169 |
+
int_vec_return_type retval;
|
| 1170 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1171 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1172 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1173 |
+
retval[i].vals[j] =
|
| 1174 |
+
static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
|
| 1175 |
+
static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
|
| 1176 |
+
}
|
| 1177 |
+
}
|
| 1178 |
+
return retval;
|
| 1179 |
+
}
|
| 1180 |
+
static Vectorized<c10::qint8> requantize_from_int(
|
| 1181 |
+
const int_vec_return_type& inp,
|
| 1182 |
+
float multiplier,
|
| 1183 |
+
int32_t zero_point) {
|
| 1184 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1185 |
+
constexpr auto min_val = std::numeric_limits<value_type>::min();
|
| 1186 |
+
constexpr auto max_val = std::numeric_limits<value_type>::max();
|
| 1187 |
+
Vectorized<c10::qint8> retval;
|
| 1188 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1189 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1190 |
+
int32_t rounded =
|
| 1191 |
+
std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
|
| 1192 |
+
zero_point;
|
| 1193 |
+
retval.vals[i * elem_per_int_vec + j] =
|
| 1194 |
+
std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
|
| 1195 |
+
}
|
| 1196 |
+
}
|
| 1197 |
+
return retval;
|
| 1198 |
+
}
|
| 1199 |
+
};
|
| 1200 |
+
|
| 1201 |
+
template <>
|
| 1202 |
+
Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
|
| 1203 |
+
return a.maximum(b);
|
| 1204 |
+
}
|
| 1205 |
+
|
| 1206 |
+
template <>
|
| 1207 |
+
struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
|
| 1208 |
+
c10::quint8,
|
| 1209 |
+
std::array<Vectorized<float>, 4>,
|
| 1210 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1211 |
+
32> {
|
| 1212 |
+
Vectorized()
|
| 1213 |
+
: VectorizedQuantizedConverter<
|
| 1214 |
+
c10::quint8,
|
| 1215 |
+
std::array<Vectorized<float>, 4>,
|
| 1216 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1217 |
+
32>() {}
|
| 1218 |
+
Vectorized(c10::quint8 val)
|
| 1219 |
+
: VectorizedQuantizedConverter<
|
| 1220 |
+
c10::quint8,
|
| 1221 |
+
std::array<Vectorized<float>, 4>,
|
| 1222 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1223 |
+
32>(val) {}
|
| 1224 |
+
Vectorized(const void* ptr)
|
| 1225 |
+
: VectorizedQuantizedConverter<
|
| 1226 |
+
c10::quint8,
|
| 1227 |
+
std::array<Vectorized<float>, 4>,
|
| 1228 |
+
std::array<Vectorized<c10::qint32>, 4>,
|
| 1229 |
+
32>(ptr) {}
|
| 1230 |
+
|
| 1231 |
+
static Vectorized<c10::quint8> loadu(const void* ptr) {
|
| 1232 |
+
return Vectorized<c10::quint8>(ptr);
|
| 1233 |
+
}
|
| 1234 |
+
|
| 1235 |
+
static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
|
| 1236 |
+
__at_align__ value_type tmp_values[size()];
|
| 1237 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 1238 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 1239 |
+
// instructions while a loop would be compiled to one instruction.
|
| 1240 |
+
for (const auto i : c10::irange(size())) {
|
| 1241 |
+
tmp_values[i] = 0;
|
| 1242 |
+
}
|
| 1243 |
+
std::memcpy(
|
| 1244 |
+
tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
|
| 1245 |
+
return Vectorized<c10::quint8>(tmp_values);
|
| 1246 |
+
}
|
| 1247 |
+
|
| 1248 |
+
static Vectorized<c10::quint8> quantize(
|
| 1249 |
+
const float_vec_return_type& rhs,
|
| 1250 |
+
float scale,
|
| 1251 |
+
int32_t zero_point,
|
| 1252 |
+
float /*inverse_scale*/) {
|
| 1253 |
+
std::array<value_type, size()> qvals;
|
| 1254 |
+
std::array<float, float_num_vecs() * 8> float_vals;
|
| 1255 |
+
|
| 1256 |
+
for (const auto i : c10::irange(float_num_vecs())) {
|
| 1257 |
+
rhs[i].store(&float_vals[i * 8], 8);
|
| 1258 |
+
}
|
| 1259 |
+
|
| 1260 |
+
at::native::quantize_vec<c10::quint8>(
|
| 1261 |
+
scale,
|
| 1262 |
+
zero_point,
|
| 1263 |
+
float_vals.data(),
|
| 1264 |
+
(c10::quint8*)qvals.data(),
|
| 1265 |
+
8 * float_num_vecs());
|
| 1266 |
+
|
| 1267 |
+
return Vectorized<c10::quint8>::loadu(qvals.data());
|
| 1268 |
+
}
|
| 1269 |
+
|
| 1270 |
+
Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
|
| 1271 |
+
Vectorized<c10::quint8> retval;
|
| 1272 |
+
for (const auto i : c10::irange(size())) {
|
| 1273 |
+
retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
|
| 1274 |
+
}
|
| 1275 |
+
return retval;
|
| 1276 |
+
}
|
| 1277 |
+
|
| 1278 |
+
Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
|
| 1279 |
+
Vectorized<c10::quint8> retval;
|
| 1280 |
+
for (const auto i : c10::irange(size())) {
|
| 1281 |
+
retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
|
| 1282 |
+
}
|
| 1283 |
+
return retval;
|
| 1284 |
+
}
|
| 1285 |
+
|
| 1286 |
+
Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
|
| 1287 |
+
return maximum(zero_point);
|
| 1288 |
+
}
|
| 1289 |
+
|
| 1290 |
+
|
| 1291 |
+
Vectorized<c10::quint8> relu6(
|
| 1292 |
+
Vectorized<c10::quint8> zero_point,
|
| 1293 |
+
Vectorized<c10::quint8> q_six) {
|
| 1294 |
+
Vectorized<c10::quint8> retval;
|
| 1295 |
+
for (const auto i : c10::irange(size())) {
|
| 1296 |
+
retval.vals[i] = std::min<value_type>(
|
| 1297 |
+
std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
|
| 1298 |
+
}
|
| 1299 |
+
return retval;
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
|
| 1303 |
+
int_vec_return_type retval;
|
| 1304 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1305 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1306 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1307 |
+
retval[i].vals[j] =
|
| 1308 |
+
static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
|
| 1309 |
+
static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
|
| 1310 |
+
}
|
| 1311 |
+
}
|
| 1312 |
+
return retval;
|
| 1313 |
+
}
|
| 1314 |
+
static Vectorized<c10::quint8> requantize_from_int(
|
| 1315 |
+
const int_vec_return_type& inp,
|
| 1316 |
+
float multiplier,
|
| 1317 |
+
int32_t zero_point) {
|
| 1318 |
+
constexpr int elem_per_int_vec = size() / int_num_vecs();
|
| 1319 |
+
constexpr auto min_val = std::numeric_limits<value_type>::min();
|
| 1320 |
+
constexpr auto max_val = std::numeric_limits<value_type>::max();
|
| 1321 |
+
Vectorized<c10::quint8> retval;
|
| 1322 |
+
for (const auto i : c10::irange(int_num_vecs())) {
|
| 1323 |
+
for (const auto j : c10::irange(elem_per_int_vec)) {
|
| 1324 |
+
int32_t rounded =
|
| 1325 |
+
std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
|
| 1326 |
+
zero_point;
|
| 1327 |
+
retval.vals[i * elem_per_int_vec + j] =
|
| 1328 |
+
std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
|
| 1329 |
+
}
|
| 1330 |
+
}
|
| 1331 |
+
return retval;
|
| 1332 |
+
}
|
| 1333 |
+
};
|
| 1334 |
+
|
| 1335 |
+
template <>
|
| 1336 |
+
Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
|
| 1337 |
+
return a.maximum(b);
|
| 1338 |
+
}
|
| 1339 |
+
|
| 1340 |
+
#endif // if defined(CPU_CAPABILITY_AVX2)
|
| 1341 |
+
}} // namespace at::vec::CPU_CAPABILITY
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 6 |
+
|
| 7 |
+
// Note: header order is important here
|
| 8 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h>
|
| 11 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h>
|
| 12 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h>
|
| 13 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h>
|
| 14 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h>
|
| 15 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h>
|
| 16 |
+
|
| 17 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h>
|
| 18 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h>
|
| 19 |
+
|
| 20 |
+
#include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
namespace vec {
|
| 24 |
+
|
| 25 |
+
inline namespace CPU_CAPABILITY {
|
| 26 |
+
|
| 27 |
+
DEFINE_CLAMP_FUNCS(c10::quint8)
|
| 28 |
+
DEFINE_CLAMP_FUNCS(c10::qint8)
|
| 29 |
+
DEFINE_CLAMP_FUNCS(c10::qint32)
|
| 30 |
+
DEFINE_CLAMP_FUNCS(int16_t)
|
| 31 |
+
DEFINE_CLAMP_FUNCS(int32_t)
|
| 32 |
+
DEFINE_CLAMP_FUNCS(int64_t)
|
| 33 |
+
DEFINE_CLAMP_FUNCS(float)
|
| 34 |
+
DEFINE_CLAMP_FUNCS(double)
|
| 35 |
+
|
| 36 |
+
template <>
|
| 37 |
+
Vectorized<double> C10_ALWAYS_INLINE fmadd(
|
| 38 |
+
const Vectorized<double>& a,
|
| 39 |
+
const Vectorized<double>& b,
|
| 40 |
+
const Vectorized<double>& c) {
|
| 41 |
+
return Vectorized<double>{
|
| 42 |
+
vec_madd(a.vec0(), b.vec0(), c.vec0()),
|
| 43 |
+
vec_madd(a.vec1(), b.vec1(), c.vec1())};
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
template <>
|
| 47 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
|
| 48 |
+
const Vectorized<int64_t>& a,
|
| 49 |
+
const Vectorized<int64_t>& b,
|
| 50 |
+
const Vectorized<int64_t>& c) {
|
| 51 |
+
return Vectorized<int64_t>{
|
| 52 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 53 |
+
}
|
| 54 |
+
template <>
|
| 55 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
|
| 56 |
+
const Vectorized<int32_t>& a,
|
| 57 |
+
const Vectorized<int32_t>& b,
|
| 58 |
+
const Vectorized<int32_t>& c) {
|
| 59 |
+
return Vectorized<int32_t>{
|
| 60 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 61 |
+
}
|
| 62 |
+
template <>
|
| 63 |
+
Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
|
| 64 |
+
const Vectorized<int16_t>& a,
|
| 65 |
+
const Vectorized<int16_t>& b,
|
| 66 |
+
const Vectorized<int16_t>& c) {
|
| 67 |
+
return Vectorized<int16_t>{
|
| 68 |
+
a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
|
| 72 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
|
| 73 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
|
| 74 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
|
| 75 |
+
DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
|
| 76 |
+
|
| 77 |
+
template <>
|
| 78 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE
|
| 79 |
+
convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
|
| 80 |
+
return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
template <>
|
| 84 |
+
Vectorized<int32_t> C10_ALWAYS_INLINE
|
| 85 |
+
convert_to_int_of_same_size<float>(
|
| 86 |
+
const Vectorized<float>& src) {
|
| 87 |
+
return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
template <>
|
| 91 |
+
inline void convert(const int32_t* src, float* dst, int64_t n) {
|
| 92 |
+
// int32_t and float have same size
|
| 93 |
+
int64_t i;
|
| 94 |
+
for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
|
| 95 |
+
const int32_t* src_a = src + i;
|
| 96 |
+
float* dst_a = dst + i;
|
| 97 |
+
vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a));
|
| 98 |
+
vint32 input_vec1 =
|
| 99 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a));
|
| 100 |
+
vfloat32 c0 = vec_float(input_vec0);
|
| 101 |
+
vfloat32 c1 = vec_float(input_vec1);
|
| 102 |
+
vec_vsx_st(c0, offset0, dst_a);
|
| 103 |
+
vec_vsx_st(c1, offset16, dst_a);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
for (; i < n; i++) {
|
| 107 |
+
dst[i] = static_cast<float>(src[i]);
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template <>
|
| 112 |
+
inline void convert(const int64_t* src, double* dst, int64_t n) {
|
| 113 |
+
int64_t i;
|
| 114 |
+
for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
|
| 115 |
+
const int64_t* src_a = src + i;
|
| 116 |
+
double* dst_a = dst + i;
|
| 117 |
+
vint64 input_vec0 =
|
| 118 |
+
vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a));
|
| 119 |
+
vint64 input_vec1 =
|
| 120 |
+
vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a));
|
| 121 |
+
vfloat64 c0 = vec_double(input_vec0);
|
| 122 |
+
vfloat64 c1 = vec_double(input_vec1);
|
| 123 |
+
vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a));
|
| 124 |
+
vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a));
|
| 125 |
+
}
|
| 126 |
+
for (; i < n; i++) {
|
| 127 |
+
dst[i] = static_cast<double>(src[i]);
|
| 128 |
+
}
|
| 129 |
+
}
|
| 130 |
+
//Generic implementation to fix compiler error
|
| 131 |
+
//TO-DO : Add optimized version for ppc64
|
| 132 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(
|
| 133 |
+
const Vectorized<Half>& a) {
|
| 134 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
| 135 |
+
__at_align__ float arr[K];
|
| 136 |
+
__at_align__ Half arr2[K];
|
| 137 |
+
a.store(arr2);
|
| 138 |
+
convert(arr2, arr, K);
|
| 139 |
+
return std::make_tuple(
|
| 140 |
+
Vectorized<float>::loadu(arr),
|
| 141 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size()));
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
inline Vectorized<Half> convert_float_half(
|
| 145 |
+
const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 146 |
+
constexpr int64_t K = Vectorized<Half>::size();
|
| 147 |
+
__at_align__ float arr[K];
|
| 148 |
+
__at_align__ Half arr2[K];
|
| 149 |
+
a.store(arr);
|
| 150 |
+
b.store(arr + Vectorized<float>::size());
|
| 151 |
+
convert(arr, arr2, K);
|
| 152 |
+
return Vectorized<Half>::loadu(arr2);
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
template <>
|
| 156 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
|
| 157 |
+
const Vectorized<double>& a,
|
| 158 |
+
const Vectorized<double>& b) {
|
| 159 |
+
// inputs:
|
| 160 |
+
// a = {a0, a1, a2, a3}
|
| 161 |
+
// b = {b0, b1, b2, b3}
|
| 162 |
+
|
| 163 |
+
vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0);
|
| 164 |
+
vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3);
|
| 165 |
+
vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0);
|
| 166 |
+
vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3);
|
| 167 |
+
// return {a0, b0, a1, b1}
|
| 168 |
+
// {a2, b2, a3, b3}
|
| 169 |
+
return std::make_pair(
|
| 170 |
+
Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11});
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
template <>
|
| 174 |
+
std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
|
| 175 |
+
const Vectorized<double>& a,
|
| 176 |
+
const Vectorized<double>& b) {
|
| 177 |
+
// inputs:
|
| 178 |
+
// a = {a0, b0, a1, b1}
|
| 179 |
+
// b = {a2, b2, a3, b3}
|
| 180 |
+
vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0);
|
| 181 |
+
vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0);
|
| 182 |
+
|
| 183 |
+
vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3);
|
| 184 |
+
vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3);
|
| 185 |
+
|
| 186 |
+
// swap lanes:
|
| 187 |
+
// return {a0, a1, a2, a3}
|
| 188 |
+
// {b0, b1, b2, b3}
|
| 189 |
+
return std::make_pair(
|
| 190 |
+
Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23});
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
template <>
|
| 194 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
|
| 195 |
+
const Vectorized<float>& a,
|
| 196 |
+
const Vectorized<float>& b) {
|
| 197 |
+
// inputs:
|
| 198 |
+
// a = {a0, a1, a2, a3,, a4, a5, a6, a7}
|
| 199 |
+
// b = {b0, b1, b2, b3,, b4, b5, b6, b7}
|
| 200 |
+
|
| 201 |
+
vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0());
|
| 202 |
+
vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0());
|
| 203 |
+
|
| 204 |
+
vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
|
| 205 |
+
vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1());
|
| 206 |
+
// group cols crossing lanes:
|
| 207 |
+
// return {a0, b0, a1, b1,, a2, b2, a3, b3}
|
| 208 |
+
// {a4, b4, a5, b5,, a6, b6, a7, b7}
|
| 209 |
+
|
| 210 |
+
return std::make_pair(
|
| 211 |
+
Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233});
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
template <>
|
| 215 |
+
std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
|
| 216 |
+
const Vectorized<float>& a,
|
| 217 |
+
const Vectorized<float>& b) {
|
| 218 |
+
// inputs:
|
| 219 |
+
// a = {a0, b0, a1, b1,, a2, b2, a3, b3}
|
| 220 |
+
// b = {a4, b4, a5, b5,, a6, b6, a7, b7}
|
| 221 |
+
|
| 222 |
+
// {a0,a2,b0,b2} {a1,a3,b1,b3}
|
| 223 |
+
vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
|
| 224 |
+
vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
|
| 225 |
+
|
| 226 |
+
vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
|
| 227 |
+
vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
|
| 228 |
+
|
| 229 |
+
vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
|
| 230 |
+
vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
|
| 231 |
+
|
| 232 |
+
vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
|
| 233 |
+
vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
|
| 234 |
+
|
| 235 |
+
// it could be done with vec_perm ,too
|
| 236 |
+
// swap lanes:
|
| 237 |
+
// return {a0, a1, a2, a3,, a4, a5, a6, a7}
|
| 238 |
+
// {b0, b1, b2, b3,, b4, b5, b6, b7}
|
| 239 |
+
|
| 240 |
+
return std::make_pair(
|
| 241 |
+
Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2});
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
} // namespace
|
| 245 |
+
} // namespace vec
|
| 246 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h
ADDED
|
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 3 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 5 |
+
#include <c10/util/complex.h>
|
| 6 |
+
#include <c10/util/irange.h>
|
| 7 |
+
|
| 8 |
+
namespace at {
|
| 9 |
+
namespace vec {
|
| 10 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 11 |
+
inline namespace CPU_CAPABILITY {
|
| 12 |
+
using ComplexDbl = c10::complex<double>;
|
| 13 |
+
|
| 14 |
+
template <>
|
| 15 |
+
class Vectorized<ComplexDbl> {
|
| 16 |
+
union {
|
| 17 |
+
struct {
|
| 18 |
+
vfloat64 _vec0;
|
| 19 |
+
vfloat64 _vec1;
|
| 20 |
+
};
|
| 21 |
+
struct {
|
| 22 |
+
vbool64 _vecb0;
|
| 23 |
+
vbool64 _vecb1;
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} __attribute__((__may_alias__));
|
| 27 |
+
|
| 28 |
+
public:
|
| 29 |
+
using value_type = ComplexDbl;
|
| 30 |
+
using vec_internal_type = vfloat64;
|
| 31 |
+
using vec_internal_mask_type = vbool64;
|
| 32 |
+
using size_type = int;
|
| 33 |
+
static constexpr size_type size() {
|
| 34 |
+
return 2;
|
| 35 |
+
}
|
| 36 |
+
Vectorized() {}
|
| 37 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
|
| 38 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
| 39 |
+
C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
|
| 40 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
|
| 41 |
+
|
| 42 |
+
Vectorized(ComplexDbl val) {
|
| 43 |
+
double real_value = val.real();
|
| 44 |
+
double imag_value = val.imag();
|
| 45 |
+
_vec0 = vfloat64{real_value, imag_value};
|
| 46 |
+
_vec1 = vfloat64{real_value, imag_value};
|
| 47 |
+
}
|
| 48 |
+
Vectorized(ComplexDbl val1, ComplexDbl val2) {
|
| 49 |
+
_vec0 = vfloat64{val1.real(), val1.imag()};
|
| 50 |
+
_vec1 = vfloat64{val2.real(), val2.imag()};
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
| 54 |
+
return _vec0;
|
| 55 |
+
}
|
| 56 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
| 57 |
+
return _vec1;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
template <int64_t mask>
|
| 61 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 0, Vectorized<ComplexDbl>>
|
| 62 |
+
C10_ALWAYS_INLINE
|
| 63 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 64 |
+
return a;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
template <int64_t mask>
|
| 68 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 1, Vectorized<ComplexDbl>>
|
| 69 |
+
C10_ALWAYS_INLINE
|
| 70 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 71 |
+
return b;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template <int64_t mask>
|
| 75 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 2, Vectorized<ComplexDbl>>
|
| 76 |
+
C10_ALWAYS_INLINE
|
| 77 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 78 |
+
return {b._vec0, a._vec1};
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
template <int64_t mask>
|
| 82 |
+
static std::enable_if_t<blendChoiceComplexDbl(mask) == 3, Vectorized<ComplexDbl>>
|
| 83 |
+
C10_ALWAYS_INLINE
|
| 84 |
+
blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 85 |
+
return {a._vec0, b._vec1};
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
template <int64_t mask>
|
| 89 |
+
static Vectorized<ComplexDbl> C10_ALWAYS_INLINE
|
| 90 |
+
el_blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 91 |
+
const vbool64 mask_1st = VsxDblMask1(mask);
|
| 92 |
+
const vbool64 mask_2nd = VsxDblMask2(mask);
|
| 93 |
+
return {
|
| 94 |
+
(vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
|
| 95 |
+
(vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)};
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
static Vectorized<ComplexDbl> blendv(
|
| 99 |
+
const Vectorized<ComplexDbl>& a,
|
| 100 |
+
const Vectorized<ComplexDbl>& b,
|
| 101 |
+
const Vectorized<ComplexDbl>& mask) {
|
| 102 |
+
// convert std::complex<V> index mask to V index mask: xy -> xxyy
|
| 103 |
+
auto mask_complex =
|
| 104 |
+
Vectorized<ComplexDbl>(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0));
|
| 105 |
+
return {
|
| 106 |
+
vec_sel(a._vec0, b._vec0, mask_complex._vecb0),
|
| 107 |
+
vec_sel(a._vec1, b._vec1, mask_complex._vecb1)};
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
static Vectorized<ComplexDbl> C10_ALWAYS_INLINE elwise_blendv(
|
| 111 |
+
const Vectorized<ComplexDbl>& a,
|
| 112 |
+
const Vectorized<ComplexDbl>& b,
|
| 113 |
+
const Vectorized<ComplexDbl>& mask) {
|
| 114 |
+
return {
|
| 115 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
| 116 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
| 117 |
+
}
|
| 118 |
+
template <typename step_t>
|
| 119 |
+
static Vectorized<ComplexDbl> arange(
|
| 120 |
+
ComplexDbl base = 0.,
|
| 121 |
+
step_t step = static_cast<step_t>(1)) {
|
| 122 |
+
return Vectorized<ComplexDbl>(base, base + step);
|
| 123 |
+
}
|
| 124 |
+
static Vectorized<ComplexDbl> set(
|
| 125 |
+
const Vectorized<ComplexDbl>& a,
|
| 126 |
+
const Vectorized<ComplexDbl>& b,
|
| 127 |
+
int64_t count = size()) {
|
| 128 |
+
switch (count) {
|
| 129 |
+
case 0:
|
| 130 |
+
return a;
|
| 131 |
+
case 1:
|
| 132 |
+
return blend<1>(a, b);
|
| 133 |
+
}
|
| 134 |
+
return b;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
| 138 |
+
loadu(const void* ptr, int count = size()) {
|
| 139 |
+
if (count == size()) {
|
| 140 |
+
return {
|
| 141 |
+
vec_vsx_ld(offset0, reinterpret_cast<const double*>(ptr)),
|
| 142 |
+
vec_vsx_ld(offset16, reinterpret_cast<const double*>(ptr))};
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
__at_align__ value_type tmp_values[size()] = {};
|
| 146 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
| 147 |
+
|
| 148 |
+
return {
|
| 149 |
+
vec_vsx_ld(offset0, reinterpret_cast<const double*>(tmp_values)),
|
| 150 |
+
vec_vsx_ld(offset16, reinterpret_cast<const double*>(tmp_values))};
|
| 151 |
+
}
|
| 152 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
| 153 |
+
if (count == size()) {
|
| 154 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(ptr));
|
| 155 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(ptr));
|
| 156 |
+
} else if (count > 0) {
|
| 157 |
+
__at_align__ value_type tmp_values[size()];
|
| 158 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(tmp_values));
|
| 159 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(tmp_values));
|
| 160 |
+
std::memcpy(
|
| 161 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
const ComplexDbl& operator[](int idx) const = delete;
|
| 166 |
+
ComplexDbl& operator[](int idx) = delete;
|
| 167 |
+
|
| 168 |
+
Vectorized<ComplexDbl> map(ComplexDbl (*const f)(ComplexDbl)) const {
|
| 169 |
+
__at_align__ ComplexDbl tmp[size()];
|
| 170 |
+
store(tmp);
|
| 171 |
+
for (const auto i : c10::irange(size())) {
|
| 172 |
+
tmp[i] = f(tmp[i]);
|
| 173 |
+
}
|
| 174 |
+
return loadu(tmp);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
Vectorized<ComplexDbl> map(ComplexDbl (*const f)(const ComplexDbl&)) const {
|
| 178 |
+
__at_align__ ComplexDbl tmp[size()];
|
| 179 |
+
store(tmp);
|
| 180 |
+
for (const auto i : c10::irange(size())) {
|
| 181 |
+
tmp[i] = f(tmp[i]);
|
| 182 |
+
}
|
| 183 |
+
return loadu(tmp);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
Vectorized<ComplexDbl> el_swapped() const {
|
| 187 |
+
vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2);
|
| 188 |
+
vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2);
|
| 189 |
+
return {v0, v1};
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
Vectorized<ComplexDbl> el_madd(
|
| 193 |
+
const Vectorized<ComplexDbl>& multiplier,
|
| 194 |
+
const Vectorized<ComplexDbl>& val) const {
|
| 195 |
+
return {
|
| 196 |
+
vec_madd(_vec0, multiplier._vec0, val._vec0),
|
| 197 |
+
vec_madd(_vec1, multiplier._vec1, val._vec1)};
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
Vectorized<ComplexDbl> el_mergeo() const {
|
| 201 |
+
vfloat64 v0 = vec_splat(_vec0, 1);
|
| 202 |
+
vfloat64 v1 = vec_splat(_vec1, 1);
|
| 203 |
+
return {v0, v1};
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
Vectorized<ComplexDbl> el_mergee() const {
|
| 207 |
+
vfloat64 v0 = vec_splat(_vec0, 0);
|
| 208 |
+
vfloat64 v1 = vec_splat(_vec1, 0);
|
| 209 |
+
return {v0, v1};
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
static Vectorized<ComplexDbl> el_mergee(
|
| 213 |
+
Vectorized<ComplexDbl>& first,
|
| 214 |
+
Vectorized<ComplexDbl>& second) {
|
| 215 |
+
return {
|
| 216 |
+
vec_mergeh(first._vec0, second._vec0),
|
| 217 |
+
vec_mergeh(first._vec1, second._vec1)};
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
static Vectorized<ComplexDbl> el_mergeo(
|
| 221 |
+
Vectorized<ComplexDbl>& first,
|
| 222 |
+
Vectorized<ComplexDbl>& second) {
|
| 223 |
+
return {
|
| 224 |
+
vec_mergel(first._vec0, second._vec0),
|
| 225 |
+
vec_mergel(first._vec1, second._vec1)};
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
Vectorized<ComplexDbl> abs_2_() const {
|
| 229 |
+
auto a = (*this).elwise_mult(*this);
|
| 230 |
+
auto permuted = a.el_swapped();
|
| 231 |
+
a = a + permuted;
|
| 232 |
+
return a;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
Vectorized<ComplexDbl> abs_() const {
|
| 236 |
+
auto vi = el_mergeo();
|
| 237 |
+
auto vr = el_mergee();
|
| 238 |
+
return {Sleef_hypotd2_u05vsx(vr._vec0, vi._vec0), Sleef_hypotd2_u05vsx(vr._vec1, vi._vec1)};
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
Vectorized<ComplexDbl> abs() const {
|
| 242 |
+
return abs_() & vd_real_mask;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
Vectorized<ComplexDbl> angle_() const {
|
| 246 |
+
// angle = atan2(b/a)
|
| 247 |
+
// auto b_a = _mm256_permute_pd(values, 0x05); // b a
|
| 248 |
+
// return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
|
| 249 |
+
Vectorized<ComplexDbl> ret;
|
| 250 |
+
ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]);
|
| 251 |
+
ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]);
|
| 252 |
+
return ret;
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
Vectorized<ComplexDbl> angle() const {
|
| 256 |
+
return angle_() & vd_real_mask;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
Vectorized<ComplexDbl> real_() const {
|
| 260 |
+
return *this & vd_real_mask;
|
| 261 |
+
}
|
| 262 |
+
Vectorized<ComplexDbl> real() const {
|
| 263 |
+
return *this & vd_real_mask;
|
| 264 |
+
}
|
| 265 |
+
Vectorized<ComplexDbl> imag_() const {
|
| 266 |
+
return *this & vd_imag_mask;
|
| 267 |
+
}
|
| 268 |
+
Vectorized<ComplexDbl> imag() const {
|
| 269 |
+
return imag_().el_swapped();
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
Vectorized<ComplexDbl> conj_() const {
|
| 273 |
+
return *this ^ vd_isign_mask;
|
| 274 |
+
}
|
| 275 |
+
Vectorized<ComplexDbl> conj() const {
|
| 276 |
+
return *this ^ vd_isign_mask;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
Vectorized<ComplexDbl> log() const {
|
| 280 |
+
// Most trigonomic ops use the log() op to improve complex number
|
| 281 |
+
// performance.
|
| 282 |
+
return map(std::log);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
Vectorized<ComplexDbl> log2() const {
|
| 286 |
+
// log2eB_inv
|
| 287 |
+
auto ret = log();
|
| 288 |
+
return ret.elwise_mult(vd_log2e_inv);
|
| 289 |
+
}
|
| 290 |
+
Vectorized<ComplexDbl> log10() const {
|
| 291 |
+
auto ret = log();
|
| 292 |
+
return ret.elwise_mult(vd_log10e_inv);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
Vectorized<ComplexDbl> log1p() const {
|
| 296 |
+
return map(std::log1p);
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
Vectorized<ComplexDbl> asin() const {
|
| 300 |
+
// asin(x)
|
| 301 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 302 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 303 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 304 |
+
auto conj = conj_();
|
| 305 |
+
auto b_a = conj.el_swapped();
|
| 306 |
+
auto ab = conj.elwise_mult(b_a);
|
| 307 |
+
auto im = ab + ab;
|
| 308 |
+
auto val_2 = (*this).elwise_mult(*this);
|
| 309 |
+
auto val_2_swapped = val_2.el_swapped();
|
| 310 |
+
auto re = horizontal_sub(val_2, val_2_swapped);
|
| 311 |
+
re = Vectorized<ComplexDbl>(vd_one) - re;
|
| 312 |
+
auto root = el_blend<0x0A>(re, im).sqrt();
|
| 313 |
+
auto ln = (b_a + root).log();
|
| 314 |
+
return ln.el_swapped().conj();
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
Vectorized<ComplexDbl> acos() const {
|
| 318 |
+
// acos(x) = pi/2 - asin(x)
|
| 319 |
+
return Vectorized(vd_pi_2) - asin();
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
Vectorized<ComplexDbl> atan() const {
|
| 323 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 324 |
+
auto ione = Vectorized(vd_imag_one);
|
| 325 |
+
auto sum = ione + *this;
|
| 326 |
+
auto sub = ione - *this;
|
| 327 |
+
auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
|
| 328 |
+
return ln * vd_imag_half; // i/2*ln()
|
| 329 |
+
}
|
| 330 |
+
Vectorized<ComplexDbl> atanh() const {
|
| 331 |
+
return map(std::atanh);
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
Vectorized<ComplexDbl> sin() const {
|
| 335 |
+
return map(std::sin);
|
| 336 |
+
}
|
| 337 |
+
Vectorized<ComplexDbl> sinh() const {
|
| 338 |
+
return map(std::sinh);
|
| 339 |
+
}
|
| 340 |
+
Vectorized<ComplexDbl> cos() const {
|
| 341 |
+
return map(std::cos);
|
| 342 |
+
}
|
| 343 |
+
Vectorized<ComplexDbl> cosh() const {
|
| 344 |
+
return map(std::cosh);
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
Vectorized<ComplexDbl> tan() const {
|
| 348 |
+
return map(std::tan);
|
| 349 |
+
}
|
| 350 |
+
Vectorized<ComplexDbl> tanh() const {
|
| 351 |
+
return map(std::tanh);
|
| 352 |
+
}
|
| 353 |
+
Vectorized<ComplexDbl> ceil() const {
|
| 354 |
+
return {vec_ceil(_vec0), vec_ceil(_vec1)};
|
| 355 |
+
}
|
| 356 |
+
Vectorized<ComplexDbl> floor() const {
|
| 357 |
+
return {vec_floor(_vec0), vec_floor(_vec1)};
|
| 358 |
+
}
|
| 359 |
+
Vectorized<ComplexDbl> neg() const {
|
| 360 |
+
auto z = Vectorized<ComplexDbl>(vd_zero);
|
| 361 |
+
return z - *this;
|
| 362 |
+
}
|
| 363 |
+
Vectorized<ComplexDbl> round() const {
|
| 364 |
+
return {vec_rint(_vec0), vec_rint(_vec1)};
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
Vectorized<ComplexDbl> trunc() const {
|
| 368 |
+
return {vec_trunc(_vec0), vec_trunc(_vec1)};
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
Vectorized<ComplexDbl> elwise_sqrt() const {
|
| 372 |
+
return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
Vectorized<ComplexDbl> sqrt() const {
|
| 376 |
+
return map(std::sqrt);
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
Vectorized<ComplexDbl> reciprocal() const {
|
| 380 |
+
// re + im*i = (a + bi) / (c + di)
|
| 381 |
+
// re = (ac + bd)/abs_2() = c/abs_2()
|
| 382 |
+
// im = (bc - ad)/abs_2() = d/abs_2()
|
| 383 |
+
auto c_d = *this ^ vd_isign_mask; // c -d
|
| 384 |
+
auto abs = abs_2_();
|
| 385 |
+
return c_d.elwise_div(abs);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
Vectorized<ComplexDbl> rsqrt() const {
|
| 389 |
+
return sqrt().reciprocal();
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
static Vectorized<ComplexDbl> horizontal_add(
|
| 393 |
+
Vectorized<ComplexDbl>& first,
|
| 394 |
+
Vectorized<ComplexDbl>& second) {
|
| 395 |
+
// Operates on individual floats, see _mm_hadd_ps
|
| 396 |
+
// {f0+f1, s0+s1, f2+f3, s2+s3, ...}
|
| 397 |
+
// i.e. it sums the re and im of each value and interleaves first and second:
|
| 398 |
+
// {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...}
|
| 399 |
+
return el_mergee(first, second) + el_mergeo(first, second);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
static Vectorized<ComplexDbl> horizontal_sub(
|
| 403 |
+
Vectorized<ComplexDbl>& first,
|
| 404 |
+
Vectorized<ComplexDbl>& second) {
|
| 405 |
+
// we will simulate it differently with 6 instructions total
|
| 406 |
+
// lets permute second so that we can add it getting horizontal sums
|
| 407 |
+
auto first_perm = first.el_swapped(); // 2perm
|
| 408 |
+
auto second_perm = second.el_swapped(); // 2perm
|
| 409 |
+
// summ
|
| 410 |
+
auto first_ret = first - first_perm; // 2sub
|
| 411 |
+
auto second_ret = second - second_perm; // 2 sub
|
| 412 |
+
// now lets choose evens
|
| 413 |
+
return el_mergee(first_ret, second_ret); // 2 mergee's
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
Vectorized<ComplexDbl> inline operator*(const Vectorized<ComplexDbl>& b) const {
|
| 417 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 418 |
+
#if 1
|
| 419 |
+
// this is more vsx friendly than simulating horizontal from x86
|
| 420 |
+
auto vi = b.el_mergeo();
|
| 421 |
+
auto vr = b.el_mergee();
|
| 422 |
+
vi = vi ^ vd_rsign_mask;
|
| 423 |
+
auto ret = elwise_mult(vr);
|
| 424 |
+
auto vx_swapped = el_swapped();
|
| 425 |
+
ret = vx_swapped.el_madd(vi, ret);
|
| 426 |
+
#else
|
| 427 |
+
auto ac_bd = elwise_mult(b);
|
| 428 |
+
auto d_c = b.el_swapped();
|
| 429 |
+
d_c = d_c ^ vd_isign_mask;
|
| 430 |
+
auto ad_bc = elwise_mult(d_c);
|
| 431 |
+
auto ret = horizontal_sub(ac_bd, ad_bc);
|
| 432 |
+
#endif
|
| 433 |
+
return ret;
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
Vectorized<ComplexDbl> inline operator/(const Vectorized<ComplexDbl>& b) const {
|
| 437 |
+
// re + im*i = (a + bi) / (c + di)
|
| 438 |
+
// re = (ac + bd)/abs_2()
|
| 439 |
+
// im = (bc - ad)/abs_2()
|
| 440 |
+
auto fabs_cd = Vectorized{
|
| 441 |
+
vec_andc(b._vec0, vd_sign_mask),
|
| 442 |
+
vec_andc(b._vec1, vd_sign_mask)}; // |c| |d|
|
| 443 |
+
auto fabs_dc = fabs_cd.el_swapped(); // |d| |c|
|
| 444 |
+
auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|)
|
| 445 |
+
auto a2 = elwise_div(scale); // a/sc b/sc
|
| 446 |
+
auto b2 = b.elwise_div(scale); // c/sc d/sc
|
| 447 |
+
auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2
|
| 448 |
+
auto dc2 = b2.el_swapped(); // d/sc c/sc
|
| 449 |
+
dc2 = dc2 ^ vd_rsign_mask; // -d/sc c/sc
|
| 450 |
+
auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2
|
| 451 |
+
auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2
|
| 452 |
+
auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 453 |
+
ret = ret.elwise_div(denom2);
|
| 454 |
+
return ret;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
Vectorized<ComplexDbl> exp() const {
|
| 458 |
+
return map(std::exp);
|
| 459 |
+
}
|
| 460 |
+
Vectorized<ComplexDbl> exp2() const {
|
| 461 |
+
return map(exp2_impl);
|
| 462 |
+
}
|
| 463 |
+
Vectorized<ComplexDbl> expm1() const {
|
| 464 |
+
return map(std::expm1);
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
Vectorized<ComplexDbl> pow(const Vectorized<ComplexDbl>& exp) const {
|
| 468 |
+
__at_align__ ComplexDbl x_tmp[size()];
|
| 469 |
+
__at_align__ ComplexDbl y_tmp[size()];
|
| 470 |
+
store(x_tmp);
|
| 471 |
+
exp.store(y_tmp);
|
| 472 |
+
for (const auto i : c10::irange(size())) {
|
| 473 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 474 |
+
}
|
| 475 |
+
return loadu(x_tmp);
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
Vectorized<ComplexDbl> sgn() const {
|
| 479 |
+
return map(at::native::sgn_impl);
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
Vectorized<ComplexDbl> operator<(const Vectorized<ComplexDbl>& other) const {
|
| 483 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 484 |
+
}
|
| 485 |
+
Vectorized<ComplexDbl> operator<=(const Vectorized<ComplexDbl>& other) const {
|
| 486 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 487 |
+
}
|
| 488 |
+
Vectorized<ComplexDbl> operator>(const Vectorized<ComplexDbl>& other) const {
|
| 489 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 490 |
+
}
|
| 491 |
+
Vectorized<ComplexDbl> operator>=(const Vectorized<ComplexDbl>& other) const {
|
| 492 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
Vectorized<ComplexDbl> eq(const Vectorized<ComplexDbl>& other) const {
|
| 496 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 497 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 498 |
+
return (eq.real() & eq.imag()) & vd_one;
|
| 499 |
+
}
|
| 500 |
+
Vectorized<ComplexDbl> ne(const Vectorized<ComplexDbl>& other) const {
|
| 501 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 502 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 503 |
+
return (ne.real() | ne.imag()) & vd_one;
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq)
|
| 507 |
+
DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne)
|
| 508 |
+
|
| 509 |
+
DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add)
|
| 510 |
+
DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub)
|
| 511 |
+
DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and)
|
| 512 |
+
DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or)
|
| 513 |
+
DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor)
|
| 514 |
+
// elementwise helpers
|
| 515 |
+
DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul)
|
| 516 |
+
DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div)
|
| 517 |
+
DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt)
|
| 518 |
+
DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge)
|
| 519 |
+
DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt)
|
| 520 |
+
DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple)
|
| 521 |
+
DEFINE_MEMBER_OP(elwise_max, ComplexDbl, vec_max)
|
| 522 |
+
};
|
| 523 |
+
|
| 524 |
+
template <>
|
| 525 |
+
Vectorized<ComplexDbl> inline maximum(
|
| 526 |
+
const Vectorized<ComplexDbl>& a,
|
| 527 |
+
const Vectorized<ComplexDbl>& b) {
|
| 528 |
+
auto abs_a = a.abs_2_();
|
| 529 |
+
auto abs_b = b.abs_2_();
|
| 530 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
|
| 531 |
+
// auto max = _mm256_blendv_ps(a, b, mask);
|
| 532 |
+
auto mask = abs_a.elwise_lt(abs_b);
|
| 533 |
+
auto max = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
|
| 534 |
+
|
| 535 |
+
return max;
|
| 536 |
+
// Exploit the fact that all-ones is a NaN.
|
| 537 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
| 538 |
+
// return _mm256_or_ps(max, isnan);
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
template <>
|
| 542 |
+
Vectorized<ComplexDbl> inline minimum(
|
| 543 |
+
const Vectorized<ComplexDbl>& a,
|
| 544 |
+
const Vectorized<ComplexDbl>& b) {
|
| 545 |
+
auto abs_a = a.abs_2_();
|
| 546 |
+
auto abs_b = b.abs_2_();
|
| 547 |
+
// auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
|
| 548 |
+
// auto min = _mm256_blendv_ps(a, b, mask);
|
| 549 |
+
auto mask = abs_a.elwise_gt(abs_b);
|
| 550 |
+
auto min = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
|
| 551 |
+
return min;
|
| 552 |
+
// Exploit the fact that all-ones is a NaN.
|
| 553 |
+
// auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
|
| 554 |
+
// return _mm256_or_ps(min, isnan);
|
| 555 |
+
}
|
| 556 |
+
|
| 557 |
+
template <>
|
| 558 |
+
Vectorized<ComplexDbl> C10_ALWAYS_INLINE operator+(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 559 |
+
return Vectorized<ComplexDbl>{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())};
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
template <>
|
| 563 |
+
Vectorized<ComplexDbl> C10_ALWAYS_INLINE operator-(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 564 |
+
return Vectorized<ComplexDbl>{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())};
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
template <>
|
| 568 |
+
Vectorized<ComplexDbl> C10_ALWAYS_INLINE operator&(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 569 |
+
return Vectorized<ComplexDbl>{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())};
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
template <>
|
| 573 |
+
Vectorized<ComplexDbl> C10_ALWAYS_INLINE operator|(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 574 |
+
return Vectorized<ComplexDbl>{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())};
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
template <>
|
| 578 |
+
Vectorized<ComplexDbl> C10_ALWAYS_INLINE operator^(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
|
| 579 |
+
return Vectorized<ComplexDbl>{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())};
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
} // namespace
|
| 583 |
+
} // namespace vec
|
| 584 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 6 |
+
namespace at {
|
| 7 |
+
namespace vec {
|
| 8 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 9 |
+
inline namespace CPU_CAPABILITY {
|
| 10 |
+
|
| 11 |
+
template <>
|
| 12 |
+
class Vectorized<int64_t> {
|
| 13 |
+
private:
|
| 14 |
+
union {
|
| 15 |
+
struct {
|
| 16 |
+
vint64 _vec0;
|
| 17 |
+
vint64 _vec1;
|
| 18 |
+
};
|
| 19 |
+
struct {
|
| 20 |
+
vbool64 _vecb0;
|
| 21 |
+
vbool64 _vecb1;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
} __attribute__((__may_alias__));
|
| 25 |
+
|
| 26 |
+
public:
|
| 27 |
+
using value_type = int64_t;
|
| 28 |
+
using vec_internal_type = vint64;
|
| 29 |
+
using vec_internal_mask_type = vbool64;
|
| 30 |
+
using size_type = int;
|
| 31 |
+
using ElementType = signed long long;
|
| 32 |
+
static constexpr size_type size() {
|
| 33 |
+
return 4;
|
| 34 |
+
}
|
| 35 |
+
Vectorized() {}
|
| 36 |
+
C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {}
|
| 37 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
| 38 |
+
C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {}
|
| 39 |
+
C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
|
| 40 |
+
C10_ALWAYS_INLINE Vectorized(int64_t scalar)
|
| 41 |
+
: _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
|
| 42 |
+
C10_ALWAYS_INLINE Vectorized(
|
| 43 |
+
int64_t scalar1,
|
| 44 |
+
int64_t scalar2,
|
| 45 |
+
int64_t scalar3,
|
| 46 |
+
int64_t scalar4)
|
| 47 |
+
: _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {}
|
| 48 |
+
|
| 49 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
| 50 |
+
return _vec0;
|
| 51 |
+
}
|
| 52 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
| 53 |
+
return _vec1;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
template <uint64_t mask>
|
| 57 |
+
static std::enable_if_t<mask == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
| 58 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 59 |
+
return a;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
template <uint64_t mask>
|
| 63 |
+
static std::enable_if_t<mask == 3, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
| 64 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 65 |
+
return {b._vec0, a._vec1};
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
template <uint64_t mask>
|
| 69 |
+
static std::enable_if_t<(mask & 15) == 15, Vectorized<int64_t>> C10_ALWAYS_INLINE
|
| 70 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 71 |
+
return b;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template <uint64_t mask>
|
| 75 |
+
static std::enable_if_t<(mask > 0 && mask < 3), Vectorized<int64_t>> C10_ALWAYS_INLINE
|
| 76 |
+
blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 77 |
+
constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
|
| 78 |
+
constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
|
| 79 |
+
const vbool64 mask_1st = (vbool64){g0, g1};
|
| 80 |
+
return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1};
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
template <uint64_t mask>
|
| 84 |
+
static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized<int64_t>>
|
| 85 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 86 |
+
constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
|
| 87 |
+
constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
|
| 88 |
+
|
| 89 |
+
const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
|
| 90 |
+
return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template <uint64_t mask>
|
| 94 |
+
static std::enable_if_t<
|
| 95 |
+
(mask > 3) && (mask & 3) != 0 && (mask & 15) != 15,
|
| 96 |
+
Vectorized<int64_t>>
|
| 97 |
+
C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 98 |
+
constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
|
| 99 |
+
constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
|
| 100 |
+
constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
|
| 101 |
+
constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
|
| 102 |
+
|
| 103 |
+
const vbool64 mask_1st = (vbool64){g0, g1};
|
| 104 |
+
const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
|
| 105 |
+
return {
|
| 106 |
+
(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st),
|
| 107 |
+
(vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
static Vectorized<int64_t> C10_ALWAYS_INLINE blendv(
|
| 111 |
+
const Vectorized<int64_t>& a,
|
| 112 |
+
const Vectorized<int64_t>& b,
|
| 113 |
+
const Vectorized<int64_t>& mask) {
|
| 114 |
+
// the mask used here returned by comparision of vec256
|
| 115 |
+
|
| 116 |
+
return {
|
| 117 |
+
vec_sel(a._vec0, b._vec0, mask._vecb0),
|
| 118 |
+
vec_sel(a._vec1, b._vec1, mask._vecb1)};
|
| 119 |
+
}
|
| 120 |
+
template <typename step_t>
|
| 121 |
+
static Vectorized<int64_t> arange(int64_t base = 0., step_t step = static_cast<step_t>(1)) {
|
| 122 |
+
return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
static Vectorized<int64_t> C10_ALWAYS_INLINE
|
| 126 |
+
set(const Vectorized<int64_t>& a,
|
| 127 |
+
const Vectorized<int64_t>& b,
|
| 128 |
+
size_t count = size()) {
|
| 129 |
+
switch (count) {
|
| 130 |
+
case 0:
|
| 131 |
+
return a;
|
| 132 |
+
case 1:
|
| 133 |
+
return blend<1>(a, b);
|
| 134 |
+
case 2:
|
| 135 |
+
return blend<3>(a, b);
|
| 136 |
+
case 3:
|
| 137 |
+
return blend<7>(a, b);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
return b;
|
| 141 |
+
}
|
| 142 |
+
static Vectorized<value_type> C10_ALWAYS_INLINE
|
| 143 |
+
loadu(const void* ptr, int count = size()) {
|
| 144 |
+
if (count == size()) {
|
| 145 |
+
static_assert(sizeof(double) == sizeof(value_type));
|
| 146 |
+
const double* dptr = reinterpret_cast<const double*>(ptr);
|
| 147 |
+
return {// treat it as double load
|
| 148 |
+
(vint64)vec_vsx_ld(offset0, dptr),
|
| 149 |
+
(vint64)vec_vsx_ld(offset16, dptr)};
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
__at_align__ double tmp_values[size()] = {};
|
| 153 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
| 154 |
+
|
| 155 |
+
return {
|
| 156 |
+
(vint64)vec_vsx_ld(offset0, tmp_values),
|
| 157 |
+
(vint64)vec_vsx_ld(offset16, tmp_values)};
|
| 158 |
+
}
|
| 159 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
| 160 |
+
if (count == size()) {
|
| 161 |
+
double* dptr = reinterpret_cast<double*>(ptr);
|
| 162 |
+
vec_vsx_st((vfloat64)_vec0, offset0, dptr);
|
| 163 |
+
vec_vsx_st((vfloat64)_vec1, offset16, dptr);
|
| 164 |
+
} else if (count > 0) {
|
| 165 |
+
__at_align__ double tmp_values[size()];
|
| 166 |
+
vec_vsx_st((vfloat64)_vec0, offset0, tmp_values);
|
| 167 |
+
vec_vsx_st((vfloat64)_vec1, offset16, tmp_values);
|
| 168 |
+
std::memcpy(
|
| 169 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
const int64_t& operator[](int idx) const = delete;
|
| 173 |
+
int64_t& operator[](int idx) = delete;
|
| 174 |
+
|
| 175 |
+
Vectorized<int64_t> angle() const {
|
| 176 |
+
return blendv(
|
| 177 |
+
Vectorized<int64_t>(0), Vectorized<int64_t>(c10::pi<int64_t>), *this < Vectorized<int64_t>(0));
|
| 178 |
+
}
|
| 179 |
+
Vectorized<int64_t> real() const {
|
| 180 |
+
return *this;
|
| 181 |
+
}
|
| 182 |
+
Vectorized<int64_t> imag() const {
|
| 183 |
+
return Vectorized<int64_t>{0};
|
| 184 |
+
}
|
| 185 |
+
Vectorized<int64_t> conj() const {
|
| 186 |
+
return *this;
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE abs() const {
|
| 190 |
+
return {vec_abs(_vec0), vec_abs(_vec1)};
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE neg() const {
|
| 194 |
+
return {vec_neg(_vec0), vec_neg(_vec1)};
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not)
|
| 198 |
+
DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq)
|
| 199 |
+
DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne)
|
| 200 |
+
DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt)
|
| 201 |
+
DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple)
|
| 202 |
+
DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt)
|
| 203 |
+
DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge)
|
| 204 |
+
DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq)
|
| 205 |
+
DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne)
|
| 206 |
+
DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt)
|
| 207 |
+
DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple)
|
| 208 |
+
DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt)
|
| 209 |
+
DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge)
|
| 210 |
+
DEFINE_MEMBER_OP(operator+, int64_t, vec_add)
|
| 211 |
+
DEFINE_MEMBER_OP(operator-, int64_t, vec_sub)
|
| 212 |
+
DEFINE_MEMBER_OP(operator*, int64_t, vec_mul)
|
| 213 |
+
DEFINE_MEMBER_OP(operator/, int64_t, vec_div)
|
| 214 |
+
DEFINE_MEMBER_OP(maximum, int64_t, vec_max)
|
| 215 |
+
DEFINE_MEMBER_OP(minimum, int64_t, vec_min)
|
| 216 |
+
DEFINE_MEMBER_OP(operator&, int64_t, vec_and)
|
| 217 |
+
DEFINE_MEMBER_OP(operator|, int64_t, vec_or)
|
| 218 |
+
DEFINE_MEMBER_OP(operator^, int64_t, vec_xor)
|
| 219 |
+
};
|
| 220 |
+
|
| 221 |
+
template <>
|
| 222 |
+
Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 223 |
+
vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
|
| 224 |
+
vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
|
| 225 |
+
return Vectorized<int64_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
template <>
|
| 229 |
+
Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 230 |
+
vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
|
| 231 |
+
vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
|
| 232 |
+
return Vectorized<int64_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
template <>
|
| 236 |
+
Vectorized<int64_t> inline maximum(
|
| 237 |
+
const Vectorized<int64_t>& a,
|
| 238 |
+
const Vectorized<int64_t>& b) {
|
| 239 |
+
return a.maximum(b);
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
template <>
|
| 243 |
+
Vectorized<int64_t> inline minimum(
|
| 244 |
+
const Vectorized<int64_t>& a,
|
| 245 |
+
const Vectorized<int64_t>& b) {
|
| 246 |
+
return a.minimum(b);
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
template <>
|
| 250 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 251 |
+
return Vectorized<int64_t>{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())};
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <>
|
| 255 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 256 |
+
return Vectorized<int64_t>{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())};
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <>
|
| 260 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 261 |
+
return Vectorized<int64_t>{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())};
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
template <>
|
| 265 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 266 |
+
return Vectorized<int64_t>{vec_div(a.vec0(), b.vec0()), vec_div(a.vec1(), b.vec1())};
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
template <>
|
| 270 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator&(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 271 |
+
return Vectorized<int64_t>{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())};
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
template <>
|
| 275 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator|(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 276 |
+
return Vectorized<int64_t>{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())};
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
template <>
|
| 280 |
+
Vectorized<int64_t> C10_ALWAYS_INLINE operator^(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
|
| 281 |
+
return Vectorized<int64_t>{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())};
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
} // namespace
|
| 285 |
+
} // namespace vec
|
| 286 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 4 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 5 |
+
#include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
|
| 6 |
+
#include <c10/util/qint32.h>
|
| 7 |
+
#include <array>
|
| 8 |
+
|
| 9 |
+
// This file defines Vectorized<> for the quantized types.
|
| 10 |
+
//
|
| 11 |
+
//
|
| 12 |
+
// Currently, we simply use these classes as efficient converters between
|
| 13 |
+
// the quantized types and Vectorized<float>, usually in bandwidth-bound cases
|
| 14 |
+
// where doing the arithmetic in full-precision is acceptable (e.g.
|
| 15 |
+
// elementwise operators).
|
| 16 |
+
//
|
| 17 |
+
//
|
| 18 |
+
// Conversions are as follows:
|
| 19 |
+
// Vectorized<qint32> -> 1x Vectorized<float>
|
| 20 |
+
//
|
| 21 |
+
// The size of the returned float vector is specified by the special
|
| 22 |
+
// constexpr function float_num_vecs. The type of the value returned
|
| 23 |
+
// from dequantize (and expected as an argument to quantize) is
|
| 24 |
+
// specified by float_vec_return_type.
|
| 25 |
+
//
|
| 26 |
+
// When writing kernels with these vectors, it is expected that floating-
|
| 27 |
+
// point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
|
| 28 |
+
// iterations.
|
| 29 |
+
|
| 30 |
+
namespace at {
|
| 31 |
+
namespace vec {
|
| 32 |
+
inline namespace CPU_CAPABILITY {
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
struct Vectorized<c10::qint32> {
|
| 36 |
+
private:
|
| 37 |
+
union {
|
| 38 |
+
struct {
|
| 39 |
+
vint32 _vec0;
|
| 40 |
+
vint32 _vec1;
|
| 41 |
+
};
|
| 42 |
+
struct {
|
| 43 |
+
vbool32 _vecb0;
|
| 44 |
+
vbool32 _vecb1;
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
} __attribute__((__may_alias__));
|
| 48 |
+
|
| 49 |
+
public:
|
| 50 |
+
Vectorized() {}
|
| 51 |
+
|
| 52 |
+
using size_type = int;
|
| 53 |
+
static constexpr size_type size() {
|
| 54 |
+
return 8;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
static constexpr size_t float_num_vecs() {
|
| 58 |
+
return 1;
|
| 59 |
+
}
|
| 60 |
+
static constexpr int int_num_vecs() {
|
| 61 |
+
return 1;
|
| 62 |
+
}
|
| 63 |
+
using float_vec_return_type = std::array<Vectorized<float>, 1>;
|
| 64 |
+
using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
|
| 65 |
+
using value_type = c10::qint32::underlying;
|
| 66 |
+
using vec_internal_type = vint32;
|
| 67 |
+
using vec_internal_mask_type = vbool32;
|
| 68 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
|
| 69 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
|
| 70 |
+
C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
|
| 71 |
+
C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
|
| 72 |
+
|
| 73 |
+
Vectorized(const c10::qint32& val)
|
| 74 |
+
: _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
|
| 75 |
+
|
| 76 |
+
static Vectorized<c10::qint32> C10_ALWAYS_INLINE
|
| 77 |
+
loadu(const void* ptr, int count = size()) {
|
| 78 |
+
if (count == size()) {
|
| 79 |
+
return {
|
| 80 |
+
vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
|
| 81 |
+
vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
__at_align__ value_type tmp_values[size()] = {};
|
| 85 |
+
std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
|
| 86 |
+
|
| 87 |
+
return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
|
| 88 |
+
}
|
| 89 |
+
void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
|
| 90 |
+
if (count == size()) {
|
| 91 |
+
vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
|
| 92 |
+
vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
|
| 93 |
+
} else if (count > 0) {
|
| 94 |
+
__at_align__ value_type tmp_values[size()];
|
| 95 |
+
vec_vsx_st(_vec0, offset0, tmp_values);
|
| 96 |
+
vec_vsx_st(_vec1, offset16, tmp_values);
|
| 97 |
+
std::memcpy(
|
| 98 |
+
ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
|
| 103 |
+
return _vec0;
|
| 104 |
+
}
|
| 105 |
+
C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
|
| 106 |
+
return _vec1;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
float_vec_return_type dequantize(
|
| 110 |
+
Vectorized<float> scale,
|
| 111 |
+
Vectorized<float> zero_point,
|
| 112 |
+
Vectorized<float> scale_zp_premul) const {
|
| 113 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
| 114 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
| 115 |
+
vfloat32 scale_vec0 = scale.vec0();
|
| 116 |
+
vfloat32 scale_vec1 = scale.vec1();
|
| 117 |
+
vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
|
| 118 |
+
vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
|
| 119 |
+
return {Vectorized<float>{
|
| 120 |
+
vec_madd(scale_vec0, float_vals0, scale_zp_premul0),
|
| 121 |
+
vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}};
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
float_vec_return_type dequantize(
|
| 125 |
+
Vectorized<float> scale,
|
| 126 |
+
Vectorized<float> zero_point) const {
|
| 127 |
+
vfloat32 float_vals0 = vec_float(_vec0);
|
| 128 |
+
vfloat32 float_vals1 = vec_float(_vec1);
|
| 129 |
+
vfloat32 scale_vec0 = scale.vec0();
|
| 130 |
+
vfloat32 scale_vec1 = scale.vec1();
|
| 131 |
+
vfloat32 zero_point0 = zero_point.vec0();
|
| 132 |
+
vfloat32 zero_point1 = zero_point.vec1();
|
| 133 |
+
return {Vectorized<float>{
|
| 134 |
+
(float_vals0 - zero_point0) * scale_vec0,
|
| 135 |
+
(float_vals1 - zero_point1) * scale_vec1}};
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
static Vectorized<c10::qint32> quantize(
|
| 139 |
+
const float_vec_return_type& rhs,
|
| 140 |
+
float scale,
|
| 141 |
+
int32_t zero_point,
|
| 142 |
+
float inverse_scale) {
|
| 143 |
+
Vectorized<c10::qint32> retval;
|
| 144 |
+
|
| 145 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
| 146 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
| 147 |
+
vfloat32 inverse_scale_v = vec_splats(inverse_scale);
|
| 148 |
+
vfloat32 vec_zero_point = vec_splats((float)(zero_point));
|
| 149 |
+
Vectorized<float> vf0 = rhs[0];
|
| 150 |
+
|
| 151 |
+
vfloat32 vecf0 = vf0.vec0();
|
| 152 |
+
vfloat32 vecf1 = vf0.vec1();
|
| 153 |
+
vecf0 = vec_mul(vecf0, inverse_scale_v);
|
| 154 |
+
vecf1 = vec_mul(vecf1, inverse_scale_v);
|
| 155 |
+
vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
|
| 156 |
+
vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
|
| 157 |
+
vint32 veci0 = vec_signed(vecf0);
|
| 158 |
+
vint32 veci1 = vec_signed(vecf1);
|
| 159 |
+
|
| 160 |
+
veci0 = vec_max(veci0, vmin);
|
| 161 |
+
veci1 = vec_max(veci1, vmin);
|
| 162 |
+
veci0 = vec_min(veci0, vmax);
|
| 163 |
+
veci1 = vec_min(veci1, vmax);
|
| 164 |
+
|
| 165 |
+
return {veci0, veci1};
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
|
| 169 |
+
return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
Vectorized<c10::qint32> relu6(
|
| 173 |
+
Vectorized<c10::qint32> zero_point,
|
| 174 |
+
Vectorized<c10::qint32> q_six) const {
|
| 175 |
+
vint32 max0 = vec_max(_vec0, zero_point._vec0);
|
| 176 |
+
vint32 max1 = vec_max(_vec1, zero_point._vec1);
|
| 177 |
+
return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
|
| 181 |
+
return {*this - b};
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
static Vectorized<c10::qint32> requantize_from_int(
|
| 185 |
+
const int_vec_return_type& inp,
|
| 186 |
+
float multiplier,
|
| 187 |
+
int32_t zero_point) {
|
| 188 |
+
const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
|
| 189 |
+
const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
|
| 190 |
+
vfloat32 vec_mult = vec_splats(multiplier);
|
| 191 |
+
vint32 vec_zero_point = vec_splats(zero_point);
|
| 192 |
+
Vectorized<c10::qint32> vi = inp[0];
|
| 193 |
+
vfloat32 vecf0 = vec_float(vi.vec0());
|
| 194 |
+
vfloat32 vecf1 = vec_float(vi.vec1());
|
| 195 |
+
|
| 196 |
+
vecf0 = vec_mul(vecf0, vec_mult);
|
| 197 |
+
vecf1 = vec_mul(vecf1, vec_mult);
|
| 198 |
+
|
| 199 |
+
vecf0 = vec_rint(vecf0);
|
| 200 |
+
vecf1 = vec_rint(vecf1);
|
| 201 |
+
|
| 202 |
+
vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point);
|
| 203 |
+
vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point);
|
| 204 |
+
|
| 205 |
+
veci0 = vec_max(veci0, vmin);
|
| 206 |
+
veci1 = vec_max(veci1, vmin);
|
| 207 |
+
veci0 = vec_min(veci0, vmax);
|
| 208 |
+
veci1 = vec_min(veci1, vmax);
|
| 209 |
+
|
| 210 |
+
return {veci0, veci1};
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq)
|
| 214 |
+
DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne)
|
| 215 |
+
DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt)
|
| 216 |
+
DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple)
|
| 217 |
+
DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt)
|
| 218 |
+
DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge)
|
| 219 |
+
DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add)
|
| 220 |
+
DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub)
|
| 221 |
+
DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul)
|
| 222 |
+
DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /)
|
| 223 |
+
DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max)
|
| 224 |
+
DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min)
|
| 225 |
+
DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and)
|
| 226 |
+
DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or)
|
| 227 |
+
DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor)
|
| 228 |
+
};
|
| 229 |
+
|
| 230 |
+
template <>
|
| 231 |
+
Vectorized<c10::qint32> inline maximum(
|
| 232 |
+
const Vectorized<c10::qint32>& a,
|
| 233 |
+
const Vectorized<c10::qint32>& b) {
|
| 234 |
+
return a.maximum(b);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
template <>
|
| 238 |
+
Vectorized<c10::qint32> inline minimum(
|
| 239 |
+
const Vectorized<c10::qint32>& a,
|
| 240 |
+
const Vectorized<c10::qint32>& b) {
|
| 241 |
+
return a.minimum(b);
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
template <>
|
| 245 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator+(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 246 |
+
return Vectorized<c10::qint32>{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())};
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
template <>
|
| 250 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator-(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 251 |
+
return Vectorized<c10::qint32>{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())};
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
template <>
|
| 255 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator*(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 256 |
+
return Vectorized<c10::qint32>{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())};
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
template <>
|
| 260 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator/(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 261 |
+
return Vectorized<c10::qint32>{a.vec0()/b.vec0(), a.vec1()/b.vec1()};
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
template <>
|
| 265 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator&(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 266 |
+
return Vectorized<c10::qint32>{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())};
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
template <>
|
| 270 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator|(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 271 |
+
return Vectorized<c10::qint32>{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())};
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
template <>
|
| 275 |
+
Vectorized<c10::qint32> C10_ALWAYS_INLINE operator^(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
|
| 276 |
+
return Vectorized<c10::qint32>{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())};
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
} // namespace
|
| 280 |
+
} // namespace vec
|
| 281 |
+
} // namespace at
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
|
| 8 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec512/vec512_float.h>
|
| 10 |
+
#include <ATen/cpu/vec/vec512/vec512_bfloat16.h>
|
| 11 |
+
#include <ATen/cpu/vec/vec512/vec512_double.h>
|
| 12 |
+
#include <ATen/cpu/vec/vec512/vec512_int.h>
|
| 13 |
+
#include <ATen/cpu/vec/vec512/vec512_qint.h>
|
| 14 |
+
#include <ATen/cpu/vec/vec512/vec512_complex_float.h>
|
| 15 |
+
#include <ATen/cpu/vec/vec512/vec512_complex_double.h>
|
| 16 |
+
#include <ATen/cpu/vec/vec512/vec512_convert.h>
|
| 17 |
+
#include <ATen/cpu/vec/vec512/vec512_mask.h>
|
| 18 |
+
|
| 19 |
+
#include <algorithm>
|
| 20 |
+
#include <cstddef>
|
| 21 |
+
#include <cstdint>
|
| 22 |
+
#include <cstring>
|
| 23 |
+
#include <ostream>
|
| 24 |
+
|
| 25 |
+
namespace at {
|
| 26 |
+
namespace vec {
|
| 27 |
+
|
| 28 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 29 |
+
inline namespace CPU_CAPABILITY {
|
| 30 |
+
|
| 31 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
|
| 32 |
+
stream << val.val_;
|
| 33 |
+
return stream;
|
| 34 |
+
}
|
| 35 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
|
| 36 |
+
stream << static_cast<int>(val.val_);
|
| 37 |
+
return stream;
|
| 38 |
+
}
|
| 39 |
+
inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
|
| 40 |
+
stream << static_cast<unsigned int>(val.val_);
|
| 41 |
+
return stream;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
template <typename T>
|
| 45 |
+
std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
|
| 46 |
+
T buf[Vectorized<T>::size()];
|
| 47 |
+
vec.store(buf);
|
| 48 |
+
stream << "vec[";
|
| 49 |
+
for (int i = 0; i != Vectorized<T>::size(); i++) {
|
| 50 |
+
if (i != 0) {
|
| 51 |
+
stream << ", ";
|
| 52 |
+
}
|
| 53 |
+
stream << buf[i];
|
| 54 |
+
}
|
| 55 |
+
stream << "]";
|
| 56 |
+
return stream;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 61 |
+
|
| 62 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 63 |
+
|
| 64 |
+
template<>
|
| 65 |
+
inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
|
| 66 |
+
return _mm512_castpd_ps(src);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
template<>
|
| 70 |
+
inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
|
| 71 |
+
return _mm512_castps_pd(src);
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template<>
|
| 75 |
+
inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
|
| 76 |
+
return _mm512_castsi512_ps(src);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
template<>
|
| 80 |
+
inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
|
| 81 |
+
return _mm512_castsi512_pd(src);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 85 |
+
#ifndef _MSC_VER
|
| 86 |
+
// MSVC is not working well on complex function overload.
|
| 87 |
+
template<int64_t scale = 1>
|
| 88 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
|
| 89 |
+
inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
|
| 90 |
+
return _mm512_i64gather_pd(vindex, base_addr, scale);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
template<int64_t scale = 1>
|
| 94 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
|
| 95 |
+
inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
|
| 96 |
+
return _mm512_i32gather_ps(vindex, base_addr, scale);
|
| 97 |
+
}
|
| 98 |
+
#endif
|
| 99 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 100 |
+
#ifndef _MSC_VER
|
| 101 |
+
// MSVC is not working well on complex function overload.
|
| 102 |
+
template<int64_t scale = 1>
|
| 103 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
|
| 104 |
+
inline mask_gather(const Vectorized<double>& src, const double* base_addr,
|
| 105 |
+
const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
|
| 106 |
+
auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF));
|
| 107 |
+
auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ);
|
| 108 |
+
return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale);
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
template<int64_t scale = 1>
|
| 112 |
+
std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
|
| 113 |
+
inline mask_gather(const Vectorized<float>& src, const float* base_addr,
|
| 114 |
+
const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
|
| 115 |
+
auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF));
|
| 116 |
+
auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ);
|
| 117 |
+
return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale);
|
| 118 |
+
}
|
| 119 |
+
#endif
|
| 120 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 121 |
+
|
| 122 |
+
template<>
|
| 123 |
+
Vectorized<int64_t>
|
| 124 |
+
inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
|
| 125 |
+
return _mm512_cvtpd_epi64(src);
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template<>
|
| 129 |
+
Vectorized<int32_t>
|
| 130 |
+
inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
|
| 131 |
+
return _mm512_cvttps_epi32(src);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
template<>
|
| 135 |
+
Vectorized<double>
|
| 136 |
+
inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
|
| 137 |
+
return _mm512_cvtepi64_pd(src);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template<>
|
| 141 |
+
Vectorized<float>
|
| 142 |
+
inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
|
| 143 |
+
return _mm512_cvtepi32_ps(src);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 147 |
+
|
| 148 |
+
template <>
|
| 149 |
+
std::pair<Vectorized<double>, Vectorized<double>>
|
| 150 |
+
inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 151 |
+
// inputs:
|
| 152 |
+
// a = {a0, a1, a3, a3, a4, a5, a6, a7}
|
| 153 |
+
// b = {b0, b1, b2, b3, b4, b5, b6, b7}
|
| 154 |
+
// group cols crossing lanes:
|
| 155 |
+
// return {a0, b0, a1, b1, a2, b2, a3, b3}
|
| 156 |
+
// {a4, b4, a5, b5, a6, b6, a7, b7}
|
| 157 |
+
__m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0);
|
| 158 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4);
|
| 159 |
+
return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 160 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
template <>
|
| 164 |
+
std::pair<Vectorized<float>, Vectorized<float>>
|
| 165 |
+
inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 166 |
+
// inputs:
|
| 167 |
+
// a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
| 168 |
+
// b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
|
| 169 |
+
//
|
| 170 |
+
// return:
|
| 171 |
+
// {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
|
| 172 |
+
// {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
|
| 173 |
+
__m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4,
|
| 174 |
+
19, 3, 18, 2, 17, 1, 16, 0);
|
| 175 |
+
__m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12,
|
| 176 |
+
27, 11, 26, 10, 25, 9, 24, 8);
|
| 177 |
+
return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 178 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 182 |
+
|
| 183 |
+
template <>
|
| 184 |
+
std::pair<Vectorized<double>, Vectorized<double>>
|
| 185 |
+
inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
|
| 186 |
+
// inputs:
|
| 187 |
+
// a = {a0, b0, a1, b1, a2, b2, a3, b3}
|
| 188 |
+
// b = {a4, b4, a5, b5, a6, b6, a7, b7}
|
| 189 |
+
// output:
|
| 190 |
+
// return {a0, a1, a2, a3, a4, a5, a6, a7}
|
| 191 |
+
// {b0, b1, b2, b3, b4, b5, b6, b7}
|
| 192 |
+
// The members of indices have been written in binary format for better understandability
|
| 193 |
+
__m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0);
|
| 194 |
+
__m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1);
|
| 195 |
+
|
| 196 |
+
return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 197 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <>
|
| 201 |
+
std::pair<Vectorized<float>, Vectorized<float>>
|
| 202 |
+
inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
|
| 203 |
+
// inputs:
|
| 204 |
+
// a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
|
| 205 |
+
// b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
|
| 206 |
+
// output:
|
| 207 |
+
// return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
| 208 |
+
// {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
|
| 209 |
+
__m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16,
|
| 210 |
+
14, 12, 10, 8, 6, 4, 2, 0);
|
| 211 |
+
__m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17,
|
| 212 |
+
15, 13, 11, 9, 7, 5, 3, 1);
|
| 213 |
+
|
| 214 |
+
return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
|
| 215 |
+
_mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 219 |
+
|
| 220 |
+
template<>
|
| 221 |
+
inline Vectorized<float> flip(const Vectorized<float> & v) {
|
| 222 |
+
const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
|
| 223 |
+
8, 9, 10, 11, 12, 13, 14, 15);
|
| 224 |
+
return _mm512_permutexvar_ps(mask, v);
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
template<>
|
| 228 |
+
inline Vectorized<double> flip(const Vectorized<double> & v) {
|
| 229 |
+
const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
|
| 230 |
+
return _mm512_permutexvar_pd(mask, v);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
template<>
|
| 234 |
+
inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
|
| 235 |
+
const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
|
| 236 |
+
return _mm512_permutexvar_epi64(mask, v);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
template<>
|
| 240 |
+
inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
|
| 241 |
+
const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
|
| 242 |
+
8, 9, 10, 11, 12, 13, 14, 15);
|
| 243 |
+
return _mm512_permutexvar_epi32(mask, v);
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
template<>
|
| 247 |
+
inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
|
| 248 |
+
const __m512i mask = _mm512_set_epi16(
|
| 249 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 250 |
+
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
|
| 251 |
+
);
|
| 252 |
+
return _mm512_permutexvar_epi16(mask, v);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
inline __m512i flip8(const __m512i & v) {
|
| 256 |
+
const __m512i mask1 = _mm512_set_epi8(
|
| 257 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 258 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 259 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
|
| 260 |
+
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
|
| 261 |
+
);
|
| 262 |
+
const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6);
|
| 263 |
+
auto reversed_vec = _mm512_shuffle_epi8(v, mask1);
|
| 264 |
+
return _mm512_permutexvar_epi64(mask2, reversed_vec);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
template<>
|
| 268 |
+
inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
|
| 269 |
+
return flip8(v);
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
template<>
|
| 273 |
+
inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
|
| 274 |
+
return flip8(v);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 278 |
+
|
| 279 |
+
}}}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h
ADDED
|
@@ -0,0 +1,1662 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 7 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
#ifndef SLEEF_CONST
|
| 23 |
+
#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER)
|
| 24 |
+
#define SLEEF_CONST const
|
| 25 |
+
#else
|
| 26 |
+
#define SLEEF_CONST
|
| 27 |
+
#endif
|
| 28 |
+
#define SLEEF_CONST_OLD SLEEF_CONST
|
| 29 |
+
#else
|
| 30 |
+
#define SLEEF_CONST_OLD
|
| 31 |
+
#endif
|
| 32 |
+
|
| 33 |
+
// bfloat16 conversion
|
| 34 |
+
static inline void cvtbf16_fp32(const __m256i& a, __m512& o) {
|
| 35 |
+
o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 39 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 40 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 41 |
+
cvtbf16_fp32(lo, o1);
|
| 42 |
+
cvtbf16_fp32(hi, o2);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
static inline __m256i cvtfp32_bf16(const __m512& src) {
|
| 46 |
+
__m512i value = _mm512_castps_si512(src);
|
| 47 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 48 |
+
auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
|
| 49 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 50 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 51 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 52 |
+
auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
|
| 53 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 54 |
+
t_value = _mm512_add_epi32(t_value, vec_bias);
|
| 55 |
+
// input += rounding_bias;
|
| 56 |
+
t_value = _mm512_add_epi32(t_value, value);
|
| 57 |
+
// input = input >> 16;
|
| 58 |
+
t_value = _mm512_srli_epi32(t_value, 16);
|
| 59 |
+
// Check NaN before converting back to bf16
|
| 60 |
+
t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
|
| 61 |
+
return _mm512_cvtusepi32_epi16(t_value);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) {
|
| 65 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 66 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 67 |
+
__m512i nan = _mm512_set1_epi32(0xffff);
|
| 68 |
+
auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
|
| 69 |
+
auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q);
|
| 70 |
+
__m512i ones = _mm512_set1_epi32(0x1);
|
| 71 |
+
__m512i vec_bias = _mm512_set1_epi32(0x7fff);
|
| 72 |
+
// uint32_t lsb = (input >> 16) & 1;
|
| 73 |
+
auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones);
|
| 74 |
+
auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones);
|
| 75 |
+
// uint32_t rounding_bias = 0x7fff + lsb;
|
| 76 |
+
t_lo = _mm512_add_epi32(t_lo, vec_bias);
|
| 77 |
+
t_hi = _mm512_add_epi32(t_hi, vec_bias);
|
| 78 |
+
// input += rounding_bias;
|
| 79 |
+
t_lo = _mm512_add_epi32(t_lo, lo);
|
| 80 |
+
t_hi = _mm512_add_epi32(t_hi, hi);
|
| 81 |
+
// input = input >> 16;
|
| 82 |
+
t_lo = _mm512_srli_epi32(t_lo, 16);
|
| 83 |
+
t_hi = _mm512_srli_epi32(t_hi, 16);
|
| 84 |
+
// Check NaN before converting back to bf16
|
| 85 |
+
t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo);
|
| 86 |
+
t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi);
|
| 87 |
+
|
| 88 |
+
t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
|
| 89 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 90 |
+
return _mm512_permutexvar_epi64(idx, t_lo);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
static inline __m512i merge_compare_result(const __m512& a, const __m512& b) {
|
| 94 |
+
__m512i lo = _mm512_castps_si512(a);
|
| 95 |
+
__m512i hi = _mm512_castps_si512(b);
|
| 96 |
+
lo = _mm512_srli_epi32(lo, 16);
|
| 97 |
+
hi = _mm512_srli_epi32(hi, 16);
|
| 98 |
+
auto out = _mm512_packus_epi32(lo, hi);
|
| 99 |
+
__m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
|
| 100 |
+
return _mm512_permutexvar_epi64(idx, out);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// float16 conversion
|
| 104 |
+
static inline void cvtfp16_fp32(const __m256i& a, __m512& o) {
|
| 105 |
+
o = _mm512_cvtph_ps(a);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
|
| 109 |
+
__m256i lo = _mm512_extracti32x8_epi32(a, 0);
|
| 110 |
+
__m256i hi = _mm512_extracti32x8_epi32(a, 1);
|
| 111 |
+
cvtfp16_fp32(lo, o1);
|
| 112 |
+
cvtfp16_fp32(hi, o2);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
static inline __m256i cvtfp32_fp16(const __m512& src) {
|
| 116 |
+
return _mm512_cvtps_ph(
|
| 117 |
+
src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) {
|
| 121 |
+
__m256i lo = _mm512_cvtps_ph(
|
| 122 |
+
a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 123 |
+
__m256i hi = _mm512_cvtps_ph(
|
| 124 |
+
b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 125 |
+
__m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo));
|
| 126 |
+
__m256 t_hi = _mm256_castsi256_ps(hi);
|
| 127 |
+
return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1));
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
// dtype conversion between float16/bfloat16 and float32
|
| 131 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 132 |
+
inline void cvt_to_fp32(const __m256i& a, __m512& o);
|
| 133 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m512& o) {
|
| 134 |
+
cvtbf16_fp32(a, o);
|
| 135 |
+
}
|
| 136 |
+
template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m512& o) {
|
| 137 |
+
cvtfp16_fp32(a, o);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 141 |
+
inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2);
|
| 142 |
+
template <> inline void cvt_to_fp32<BFloat16>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 143 |
+
cvtbf16_fp32(a, o1, o2);
|
| 144 |
+
}
|
| 145 |
+
template <> inline void cvt_to_fp32<Half>(const __m512i& a, __m512& o1, __m512& o2) {
|
| 146 |
+
cvtfp16_fp32(a, o1, o2);
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
template <typename T, bool is_compare_op = false,
|
| 150 |
+
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
|
| 151 |
+
inline __m512i cvt_from_fp32(const __m512& a, const __m512& b);
|
| 152 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, false>(const __m512& a, const __m512& b) {
|
| 153 |
+
return cvtfp32_bf16(a, b);
|
| 154 |
+
}
|
| 155 |
+
template <> inline __m512i cvt_from_fp32<BFloat16, true>(const __m512& a, const __m512& b) {
|
| 156 |
+
return merge_compare_result(a, b);
|
| 157 |
+
}
|
| 158 |
+
template <> inline __m512i cvt_from_fp32<Half, false>(const __m512& a, const __m512& b) {
|
| 159 |
+
return cvtfp32_fp16(a, b);
|
| 160 |
+
}
|
| 161 |
+
template <> inline __m512i cvt_from_fp32<Half, true>(const __m512& a, const __m512& b) {
|
| 162 |
+
return cvtfp32_fp16(a, b);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
template <typename T>
|
| 166 |
+
class Vectorized16 {
|
| 167 |
+
static_assert(
|
| 168 |
+
is_reduced_floating_point_v<T>,
|
| 169 |
+
"Support only float16 and bfloat16.");
|
| 170 |
+
private:
|
| 171 |
+
__m512i values;
|
| 172 |
+
public:
|
| 173 |
+
using value_type = uint16_t;
|
| 174 |
+
using size_type = int;
|
| 175 |
+
static constexpr size_type size() {
|
| 176 |
+
return 32;
|
| 177 |
+
}
|
| 178 |
+
Vectorized16() {}
|
| 179 |
+
Vectorized16(__m512i v) : values(v) {}
|
| 180 |
+
Vectorized16(T val) {
|
| 181 |
+
value_type uw = val.x;
|
| 182 |
+
values = _mm512_set1_epi16(uw);
|
| 183 |
+
}
|
| 184 |
+
Vectorized16(T val1, T val2, T val3, T val4,
|
| 185 |
+
T val5, T val6, T val7, T val8,
|
| 186 |
+
T val9, T val10, T val11, T val12,
|
| 187 |
+
T val13, T val14, T val15, T val16,
|
| 188 |
+
T val17, T val18, T val19, T val20,
|
| 189 |
+
T val21, T val22, T val23, T val24,
|
| 190 |
+
T val25, T val26, T val27, T val28,
|
| 191 |
+
T val29, T val30, T val31, T val32) {
|
| 192 |
+
values = _mm512_set_epi16(
|
| 193 |
+
val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x,
|
| 194 |
+
val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x,
|
| 195 |
+
val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x,
|
| 196 |
+
val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x);
|
| 197 |
+
}
|
| 198 |
+
operator __m512i() const {
|
| 199 |
+
return values;
|
| 200 |
+
}
|
| 201 |
+
T& operator[](int idx) = delete;
|
| 202 |
+
const T& operator[](int idx) const = delete;
|
| 203 |
+
int zero_mask() const {
|
| 204 |
+
// returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
|
| 205 |
+
return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0));
|
| 206 |
+
}
|
| 207 |
+
static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
|
| 208 |
+
if (count == size())
|
| 209 |
+
return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
|
| 210 |
+
|
| 211 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 212 |
+
return _mm512_maskz_loadu_epi16(mask, ptr);
|
| 213 |
+
}
|
| 214 |
+
void store(void* ptr, int count = size()) const {
|
| 215 |
+
if (count == size()) {
|
| 216 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
|
| 217 |
+
} else if (count > 0) {
|
| 218 |
+
__mmask32 mask = (1ULL << count) - 1;
|
| 219 |
+
_mm512_mask_storeu_epi16(ptr, mask, values);
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
template <int64_t mask>
|
| 223 |
+
static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
|
| 224 |
+
__at_align__ int16_t tmp_values[size()];
|
| 225 |
+
a.store(tmp_values);
|
| 226 |
+
if (mask & 0x01)
|
| 227 |
+
tmp_values[0] = b.values[31];
|
| 228 |
+
if (mask & 0x02)
|
| 229 |
+
tmp_values[1] = b.values[30];
|
| 230 |
+
if (mask & 0x04)
|
| 231 |
+
tmp_values[2] = b.values[29];
|
| 232 |
+
if (mask & 0x08)
|
| 233 |
+
tmp_values[3] = b.values[28];
|
| 234 |
+
if (mask & 0x10)
|
| 235 |
+
tmp_values[4] = b.values[27];
|
| 236 |
+
if (mask & 0x20)
|
| 237 |
+
tmp_values[5] = b.values[26];
|
| 238 |
+
if (mask & 0x40)
|
| 239 |
+
tmp_values[6] = b.values[25];
|
| 240 |
+
if (mask & 0x80)
|
| 241 |
+
tmp_values[7] = b.values[24];
|
| 242 |
+
if (mask & 0x100)
|
| 243 |
+
tmp_values[8] = b.values[23];
|
| 244 |
+
if (mask & 0x200)
|
| 245 |
+
tmp_values[9] = b.values[22];
|
| 246 |
+
if (mask & 0x400)
|
| 247 |
+
tmp_values[10] = b.values[21];
|
| 248 |
+
if (mask & 0x800)
|
| 249 |
+
tmp_values[11] = b.values[20];
|
| 250 |
+
if (mask & 0x1000)
|
| 251 |
+
tmp_values[12] = b.values[19];
|
| 252 |
+
if (mask & 0x2000)
|
| 253 |
+
tmp_values[13] = b.values[18];
|
| 254 |
+
if (mask & 0x4000)
|
| 255 |
+
tmp_values[14] = b.values[17];
|
| 256 |
+
if (mask & 0x8000)
|
| 257 |
+
tmp_values[15] = b.values[16];
|
| 258 |
+
if (mask & 0x10000)
|
| 259 |
+
tmp_values[16] = b.values[15];
|
| 260 |
+
if (mask & 0x20000)
|
| 261 |
+
tmp_values[17] = b.values[14];
|
| 262 |
+
if (mask & 0x40000)
|
| 263 |
+
tmp_values[18] = b.values[13];
|
| 264 |
+
if (mask & 0x80000)
|
| 265 |
+
tmp_values[19] = b.values[12];
|
| 266 |
+
if (mask & 0x100000)
|
| 267 |
+
tmp_values[20] = b.values[11];
|
| 268 |
+
if (mask & 0x200000)
|
| 269 |
+
tmp_values[21] = b.values[10];
|
| 270 |
+
if (mask & 0x400000)
|
| 271 |
+
tmp_values[22] = b.values[9];
|
| 272 |
+
if (mask & 0x800000)
|
| 273 |
+
tmp_values[23] = b.values[8];
|
| 274 |
+
if (mask & 0x1000000)
|
| 275 |
+
tmp_values[24] = b.values[7];
|
| 276 |
+
if (mask & 0x2000000)
|
| 277 |
+
tmp_values[25] = b.values[6];
|
| 278 |
+
if (mask & 0x4000000)
|
| 279 |
+
tmp_values[26] = b.values[5];
|
| 280 |
+
if (mask & 0x8000000)
|
| 281 |
+
tmp_values[27] = b.values[4];
|
| 282 |
+
if (mask & 0x10000000)
|
| 283 |
+
tmp_values[28] = b.values[3];
|
| 284 |
+
if (mask & 0x20000000)
|
| 285 |
+
tmp_values[29] = b.values[2];
|
| 286 |
+
if (mask & 0x40000000)
|
| 287 |
+
tmp_values[30] = b.values[1];
|
| 288 |
+
if (mask & 0x80000000)
|
| 289 |
+
tmp_values[31] = b.values[0];
|
| 290 |
+
return loadu(tmp_values);
|
| 291 |
+
}
|
| 292 |
+
static Vectorized<T> blendv(const Vectorized<T>& a,
|
| 293 |
+
const Vectorized<T>& b, const Vectorized<T>& mask) {
|
| 294 |
+
auto all_ones = _mm512_set1_epi16(0xFFFF);
|
| 295 |
+
auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ);
|
| 296 |
+
return _mm512_mask_blend_epi16(mask_, a.values, b.values);
|
| 297 |
+
}
|
| 298 |
+
template<typename step_t>
|
| 299 |
+
static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
|
| 300 |
+
return Vectorized<T>(
|
| 301 |
+
base, base + step, base + 2 * step, base + 3 * step,
|
| 302 |
+
base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
|
| 303 |
+
base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
|
| 304 |
+
base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
|
| 305 |
+
base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
|
| 306 |
+
base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
|
| 307 |
+
base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
|
| 308 |
+
base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
|
| 309 |
+
}
|
| 310 |
+
static Vectorized<T> set(const Vectorized<T>& a,
|
| 311 |
+
const Vectorized<T>& b, int64_t count = size()) {
|
| 312 |
+
switch (count) {
|
| 313 |
+
case 0:
|
| 314 |
+
return a;
|
| 315 |
+
case 1:
|
| 316 |
+
return blend<1>(a, b);
|
| 317 |
+
case 2:
|
| 318 |
+
return blend<3>(a, b);
|
| 319 |
+
case 3:
|
| 320 |
+
return blend<7>(a, b);
|
| 321 |
+
case 4:
|
| 322 |
+
return blend<15>(a, b);
|
| 323 |
+
case 5:
|
| 324 |
+
return blend<31>(a, b);
|
| 325 |
+
case 6:
|
| 326 |
+
return blend<63>(a, b);
|
| 327 |
+
case 7:
|
| 328 |
+
return blend<127>(a, b);
|
| 329 |
+
case 8:
|
| 330 |
+
return blend<255>(a, b);
|
| 331 |
+
case 9:
|
| 332 |
+
return blend<511>(a, b);
|
| 333 |
+
case 10:
|
| 334 |
+
return blend<1023>(a, b);
|
| 335 |
+
case 11:
|
| 336 |
+
return blend<2047>(a, b);
|
| 337 |
+
case 12:
|
| 338 |
+
return blend<4095>(a, b);
|
| 339 |
+
case 13:
|
| 340 |
+
return blend<8191>(a, b);
|
| 341 |
+
case 14:
|
| 342 |
+
return blend<16383>(a, b);
|
| 343 |
+
case 15:
|
| 344 |
+
return blend<32767>(a, b);
|
| 345 |
+
case 16:
|
| 346 |
+
return blend<65535>(a, b);
|
| 347 |
+
case 17:
|
| 348 |
+
return blend<131071>(a, b);
|
| 349 |
+
case 18:
|
| 350 |
+
return blend<262143>(a, b);
|
| 351 |
+
case 19:
|
| 352 |
+
return blend<524287>(a, b);
|
| 353 |
+
case 20:
|
| 354 |
+
return blend<1048575>(a, b);
|
| 355 |
+
case 21:
|
| 356 |
+
return blend<2097151>(a, b);
|
| 357 |
+
case 22:
|
| 358 |
+
return blend<4194303>(a, b);
|
| 359 |
+
case 23:
|
| 360 |
+
return blend<8388607>(a, b);
|
| 361 |
+
case 24:
|
| 362 |
+
return blend<16777215>(a, b);
|
| 363 |
+
case 25:
|
| 364 |
+
return blend<33554431>(a, b);
|
| 365 |
+
case 26:
|
| 366 |
+
return blend<67108863>(a, b);
|
| 367 |
+
case 27:
|
| 368 |
+
return blend<134217727>(a, b);
|
| 369 |
+
case 28:
|
| 370 |
+
return blend<268435455>(a, b);
|
| 371 |
+
case 29:
|
| 372 |
+
return blend<536870911>(a, b);
|
| 373 |
+
case 30:
|
| 374 |
+
return blend<1073741823>(a, b);
|
| 375 |
+
case 31:
|
| 376 |
+
return blend<2147483647>(a, b);
|
| 377 |
+
}
|
| 378 |
+
return b;
|
| 379 |
+
}
|
| 380 |
+
#pragma clang diagnostic push
|
| 381 |
+
#pragma clang diagnostic ignored "-Wignored-qualifiers"
|
| 382 |
+
|
| 383 |
+
Vectorized<T> map(SLEEF_CONST __m512 (*SLEEF_CONST_OLD vop)(__m512)) const {
|
| 384 |
+
__m512 lo, hi;
|
| 385 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 386 |
+
const auto o1 = vop(lo);
|
| 387 |
+
const auto o2 = vop(hi);
|
| 388 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 389 |
+
}
|
| 390 |
+
Vectorized<T> isnan() const {
|
| 391 |
+
__m512 lo, hi;
|
| 392 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 393 |
+
__mmask16 lo_mask, hi_mask;
|
| 394 |
+
__m512 zero = _mm512_set1_ps(0.0);
|
| 395 |
+
__m512i zeroi = _mm512_castps_si512(zero);
|
| 396 |
+
lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q);
|
| 397 |
+
lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF));
|
| 398 |
+
hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q);
|
| 399 |
+
hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF));
|
| 400 |
+
return merge_compare_result(lo, hi);
|
| 401 |
+
}
|
| 402 |
+
#pragma clang diagnostic pop
|
| 403 |
+
Vectorized<T> abs() const {
|
| 404 |
+
return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values);
|
| 405 |
+
}
|
| 406 |
+
Vectorized<T> angle() const {
|
| 407 |
+
__m512 lo, hi;
|
| 408 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 409 |
+
auto angle_lambda = [](__m512 values) {
|
| 410 |
+
const auto zero_vec = _mm512_set1_ps(0.f);
|
| 411 |
+
const auto nan_vec = _mm512_set1_ps(NAN);
|
| 412 |
+
const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
|
| 413 |
+
const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
|
| 414 |
+
not_nan_mask, 0xFFFFFFFF);
|
| 415 |
+
const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec),
|
| 416 |
+
zero_vec, _CMP_EQ_OQ);
|
| 417 |
+
const auto pi = _mm512_set1_ps(c10::pi<float>);
|
| 418 |
+
|
| 419 |
+
const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
|
| 420 |
+
auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
|
| 421 |
+
angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
|
| 422 |
+
return angle;
|
| 423 |
+
};
|
| 424 |
+
auto o1 = angle_lambda(lo);
|
| 425 |
+
auto o2 = angle_lambda(hi);
|
| 426 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 427 |
+
}
|
| 428 |
+
Vectorized<T> real() const {
|
| 429 |
+
return *this;
|
| 430 |
+
}
|
| 431 |
+
Vectorized<T> imag() const {
|
| 432 |
+
return _mm512_set1_epi16(0);
|
| 433 |
+
}
|
| 434 |
+
Vectorized<T> conj() const {
|
| 435 |
+
return *this;
|
| 436 |
+
}
|
| 437 |
+
Vectorized<T> acos() const {
|
| 438 |
+
return map(Sleef_acosf16_u10);
|
| 439 |
+
}
|
| 440 |
+
Vectorized<T> acosh() const {
|
| 441 |
+
return map(Sleef_acoshf16_u10);
|
| 442 |
+
}
|
| 443 |
+
Vectorized<T> asin() const {
|
| 444 |
+
return map(Sleef_asinf16_u10);
|
| 445 |
+
}
|
| 446 |
+
Vectorized<T> atan() const {
|
| 447 |
+
return map(Sleef_atanf16_u10);
|
| 448 |
+
}
|
| 449 |
+
Vectorized<T> atanh() const {
|
| 450 |
+
return map(Sleef_atanhf16_u10);
|
| 451 |
+
}
|
| 452 |
+
Vectorized<T> atan2(const Vectorized<T> &b) const {
|
| 453 |
+
__m512 lo, hi;
|
| 454 |
+
__m512 b1, b2;
|
| 455 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 456 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 457 |
+
auto o1 = Sleef_atan2f16_u10(lo, b1);
|
| 458 |
+
auto o2 = Sleef_atan2f16_u10(hi, b2);
|
| 459 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 460 |
+
}
|
| 461 |
+
Vectorized<T> copysign(const Vectorized<T> &sign) const {
|
| 462 |
+
// copy sign bit (0x8000) from sign and remaining bits from values
|
| 463 |
+
__m512i mask_value = _mm512_set1_epi32(~0x80008000);
|
| 464 |
+
__m512i mask_signbit = _mm512_set1_epi32(0x80008000);
|
| 465 |
+
return Vectorized<T>(
|
| 466 |
+
_mm512_or_si512(
|
| 467 |
+
_mm512_and_si512(values, mask_value),
|
| 468 |
+
_mm512_and_si512(sign, mask_signbit)));
|
| 469 |
+
}
|
| 470 |
+
Vectorized<T> erf() const {
|
| 471 |
+
return map(Sleef_erff16_u10);
|
| 472 |
+
}
|
| 473 |
+
Vectorized<T> erfc() const {
|
| 474 |
+
return map(Sleef_erfcf16_u15);
|
| 475 |
+
}
|
| 476 |
+
Vectorized<T> erfinv() const {
|
| 477 |
+
__m512 lo, hi;
|
| 478 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 479 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 480 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 481 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 482 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 483 |
+
tmp1[i] = calc_erfinv(tmp1[i]);
|
| 484 |
+
tmp2[i] = calc_erfinv(tmp2[i]);
|
| 485 |
+
}
|
| 486 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 487 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 488 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 489 |
+
}
|
| 490 |
+
Vectorized<T> exp() const {
|
| 491 |
+
return map(Sleef_expf16_u10);
|
| 492 |
+
}
|
| 493 |
+
Vectorized<T> exp2() const {
|
| 494 |
+
return map(Sleef_exp2f16_u10);
|
| 495 |
+
}
|
| 496 |
+
Vectorized<T> expm1() const {
|
| 497 |
+
return map(Sleef_expm1f16_u10);
|
| 498 |
+
}
|
| 499 |
+
Vectorized<T> exp_u20() const {
|
| 500 |
+
return exp();
|
| 501 |
+
}
|
| 502 |
+
Vectorized<T> fmod(const Vectorized<T> & q) const {
|
| 503 |
+
__m512 x_lo, x_hi;
|
| 504 |
+
cvt_to_fp32<T>(values, x_lo, x_hi);
|
| 505 |
+
__m512 q_lo, q_hi;
|
| 506 |
+
cvtbf16_fp32(q.values, q_lo, q_hi);
|
| 507 |
+
auto o1 = Sleef_fmodf16(x_lo, q_lo);
|
| 508 |
+
auto o2 = Sleef_fmodf16(x_hi, q_hi);
|
| 509 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 510 |
+
}
|
| 511 |
+
Vectorized<T> hypot(const Vectorized<T> &b) const {
|
| 512 |
+
__m512 lo, hi;
|
| 513 |
+
__m512 b1, b2;
|
| 514 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 515 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 516 |
+
auto o1 = Sleef_hypotf16_u05(lo, b1);
|
| 517 |
+
auto o2 = Sleef_hypotf16_u05(hi, b2);
|
| 518 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 519 |
+
}
|
| 520 |
+
Vectorized<T> i0() const {
|
| 521 |
+
__m512 lo, hi;
|
| 522 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 523 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 524 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 525 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 526 |
+
for (int64_t i = 0; i < size() / 2; i++) {
|
| 527 |
+
tmp1[i] = calc_i0(tmp1[i]);
|
| 528 |
+
tmp2[i] = calc_i0(tmp2[i]);
|
| 529 |
+
}
|
| 530 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 531 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 532 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 533 |
+
}
|
| 534 |
+
Vectorized<T> i0e() const {
|
| 535 |
+
__m512 lo, hi;
|
| 536 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 537 |
+
constexpr auto sz = size();
|
| 538 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 539 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 540 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 541 |
+
|
| 542 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 543 |
+
tmp1[i] = calc_i0e(tmp1[i]);
|
| 544 |
+
tmp2[i] = calc_i0e(tmp2[i]);
|
| 545 |
+
}
|
| 546 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 547 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 548 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 549 |
+
}
|
| 550 |
+
Vectorized<T> digamma() const {
|
| 551 |
+
__m512 lo, hi;
|
| 552 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 553 |
+
constexpr auto sz = size();
|
| 554 |
+
__at_align__ float tmp1[sz / 2], tmp2[sz / 2];
|
| 555 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 556 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 557 |
+
|
| 558 |
+
for (auto i = decltype(sz){0}; i < sz / 2; i++) {
|
| 559 |
+
tmp1[i] = calc_digamma(tmp1[i]);
|
| 560 |
+
tmp2[i] = calc_digamma(tmp2[i]);
|
| 561 |
+
}
|
| 562 |
+
const auto o1 = _mm512_loadu_ps(tmp1);
|
| 563 |
+
const auto o2 = _mm512_loadu_ps(tmp2);
|
| 564 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 565 |
+
}
|
| 566 |
+
Vectorized<T> igamma(const Vectorized<T> &x) const {
|
| 567 |
+
__m512 lo, hi;
|
| 568 |
+
__m512 xlo, xhi;
|
| 569 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 570 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 571 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 572 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 573 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 574 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 575 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 576 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 577 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 578 |
+
tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
|
| 579 |
+
tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
|
| 580 |
+
}
|
| 581 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 582 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 583 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
Vectorized<T> igammac(const Vectorized<T> &x) const {
|
| 587 |
+
__m512 lo, hi;
|
| 588 |
+
__m512 xlo, xhi;
|
| 589 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 590 |
+
cvt_to_fp32<T>(x.values, xlo, xhi);
|
| 591 |
+
__at_align__ float tmp1[size() / 2], tmp2[size() / 2];
|
| 592 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
|
| 593 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
|
| 594 |
+
__at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
|
| 595 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
|
| 596 |
+
_mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
|
| 597 |
+
for (int64_t i = 0; i < size() / 2; ++i) {
|
| 598 |
+
tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
|
| 599 |
+
tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
|
| 600 |
+
}
|
| 601 |
+
auto o1 = _mm512_loadu_ps(tmp1);
|
| 602 |
+
auto o2 = _mm512_loadu_ps(tmp2);
|
| 603 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 604 |
+
}
|
| 605 |
+
Vectorized<T> log() const {
|
| 606 |
+
return map(Sleef_logf16_u10);
|
| 607 |
+
}
|
| 608 |
+
Vectorized<T> log2() const {
|
| 609 |
+
return map(Sleef_log2f16_u10);
|
| 610 |
+
}
|
| 611 |
+
Vectorized<T> log10() const {
|
| 612 |
+
return map(Sleef_log10f16_u10);
|
| 613 |
+
}
|
| 614 |
+
Vectorized<T> log1p() const {
|
| 615 |
+
return map(Sleef_log1pf16_u10);
|
| 616 |
+
}
|
| 617 |
+
Vectorized<T> sin() const {
|
| 618 |
+
return map(Sleef_sinf16_u10);
|
| 619 |
+
}
|
| 620 |
+
Vectorized<T> sinh() const {
|
| 621 |
+
return map(Sleef_sinhf16_u10);
|
| 622 |
+
}
|
| 623 |
+
Vectorized<T> cos() const {
|
| 624 |
+
return map(Sleef_cosf16_u10);
|
| 625 |
+
}
|
| 626 |
+
Vectorized<T> cosh() const {
|
| 627 |
+
return map(Sleef_coshf16_u10);
|
| 628 |
+
}
|
| 629 |
+
Vectorized<T> ceil() const {
|
| 630 |
+
__m512 lo, hi;
|
| 631 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 632 |
+
auto o1 = _mm512_ceil_ps(lo);
|
| 633 |
+
auto o2 = _mm512_ceil_ps(hi);
|
| 634 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 635 |
+
}
|
| 636 |
+
Vectorized<T> floor() const {
|
| 637 |
+
__m512 lo, hi;
|
| 638 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 639 |
+
auto o1 = _mm512_floor_ps(lo);
|
| 640 |
+
auto o2 = _mm512_floor_ps(hi);
|
| 641 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 642 |
+
}
|
| 643 |
+
Vectorized<T> neg() const {
|
| 644 |
+
return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000));
|
| 645 |
+
}
|
| 646 |
+
Vectorized<T> round() const {
|
| 647 |
+
__m512 lo, hi;
|
| 648 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 649 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 650 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 651 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 652 |
+
}
|
| 653 |
+
Vectorized<T> tan() const {
|
| 654 |
+
return map(Sleef_tanf16_u10);
|
| 655 |
+
}
|
| 656 |
+
Vectorized<T> tanh() const {
|
| 657 |
+
return map(Sleef_tanhf16_u10);
|
| 658 |
+
}
|
| 659 |
+
Vectorized<T> trunc() const {
|
| 660 |
+
__m512 lo, hi;
|
| 661 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 662 |
+
auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 663 |
+
auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 664 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 665 |
+
}
|
| 666 |
+
Vectorized<T> lgamma() const {
|
| 667 |
+
return map(Sleef_lgammaf16_u10);
|
| 668 |
+
}
|
| 669 |
+
Vectorized<T> sqrt() const {
|
| 670 |
+
__m512 lo, hi;
|
| 671 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 672 |
+
auto o1 = _mm512_sqrt_ps(lo);
|
| 673 |
+
auto o2 = _mm512_sqrt_ps(hi);
|
| 674 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 675 |
+
}
|
| 676 |
+
Vectorized<T> reciprocal() const {
|
| 677 |
+
__m512 lo, hi;
|
| 678 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 679 |
+
auto ones = _mm512_set1_ps(1);
|
| 680 |
+
auto o1 = _mm512_div_ps(ones, lo);
|
| 681 |
+
auto o2 = _mm512_div_ps(ones, hi);
|
| 682 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 683 |
+
}
|
| 684 |
+
Vectorized<T> rsqrt() const {
|
| 685 |
+
__m512 lo, hi;
|
| 686 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 687 |
+
auto ones = _mm512_set1_ps(1);
|
| 688 |
+
auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo));
|
| 689 |
+
auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi));
|
| 690 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 691 |
+
}
|
| 692 |
+
Vectorized<T> pow(const Vectorized<T> &b) const {
|
| 693 |
+
__m512 lo, hi;
|
| 694 |
+
__m512 b1, b2;
|
| 695 |
+
cvt_to_fp32<T>(values, lo, hi);
|
| 696 |
+
cvt_to_fp32<T>(b.values, b1, b2);
|
| 697 |
+
auto o1 = Sleef_powf16_u10(lo, b1);
|
| 698 |
+
auto o2 = Sleef_powf16_u10(hi, b2);
|
| 699 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 700 |
+
}
|
| 701 |
+
private:
|
| 702 |
+
template<typename Op>
|
| 703 |
+
Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
|
| 704 |
+
__m512 a_lo, a_hi;
|
| 705 |
+
__m512 b_lo, b_hi;
|
| 706 |
+
cvt_to_fp32<T>(values, a_lo, a_hi);
|
| 707 |
+
cvt_to_fp32<T>(b.values, b_lo, b_hi);
|
| 708 |
+
auto o1 = op(a_lo, b_lo);
|
| 709 |
+
auto o2 = op(a_hi, b_hi);
|
| 710 |
+
return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
public:
|
| 714 |
+
Vectorized<T> inline operator>(const Vectorized<T>& other) const {
|
| 715 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 716 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 717 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ);
|
| 718 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 719 |
+
});
|
| 720 |
+
}
|
| 721 |
+
Vectorized<T> inline operator<(const Vectorized<T>& other) const {
|
| 722 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 723 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 724 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ);
|
| 725 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 726 |
+
});
|
| 727 |
+
}
|
| 728 |
+
Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
|
| 729 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 730 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 731 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ);
|
| 732 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 733 |
+
});
|
| 734 |
+
}
|
| 735 |
+
Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
|
| 736 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 737 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 738 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ);
|
| 739 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 740 |
+
});
|
| 741 |
+
}
|
| 742 |
+
Vectorized<T> inline operator==(const Vectorized<T>& other) const {
|
| 743 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 744 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 745 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ);
|
| 746 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 747 |
+
});
|
| 748 |
+
}
|
| 749 |
+
Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
|
| 750 |
+
return binary_compare(other, [](__m512 x, __m512 y) {
|
| 751 |
+
auto zero_vec = _mm512_set1_epi32(0);
|
| 752 |
+
auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ);
|
| 753 |
+
return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
|
| 754 |
+
});
|
| 755 |
+
}
|
| 756 |
+
};
|
| 757 |
+
|
| 758 |
+
template<typename T, typename Op>
|
| 759 |
+
static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
|
| 760 |
+
__m512 a_lo, a_hi;
|
| 761 |
+
__m512 b_lo, b_hi;
|
| 762 |
+
cvt_to_fp32<T>(__m512i(a), a_lo, a_hi);
|
| 763 |
+
cvt_to_fp32<T>(__m512i(b), b_lo, b_hi);
|
| 764 |
+
auto o1 = op(a_lo, b_lo);
|
| 765 |
+
auto o2 = op(a_hi, b_hi);
|
| 766 |
+
return cvt_from_fp32<T>(o1, o2);
|
| 767 |
+
}
|
| 768 |
+
|
| 769 |
+
template <>
|
| 770 |
+
class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
|
| 771 |
+
public:
|
| 772 |
+
using Vectorized16::Vectorized16;
|
| 773 |
+
|
| 774 |
+
Vectorized<BFloat16> frac() const;
|
| 775 |
+
|
| 776 |
+
Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
|
| 777 |
+
Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
|
| 778 |
+
Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
|
| 779 |
+
Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
|
| 780 |
+
Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
|
| 781 |
+
Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
|
| 782 |
+
};
|
| 783 |
+
|
| 784 |
+
Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 785 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 786 |
+
}
|
| 787 |
+
Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 788 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 789 |
+
}
|
| 790 |
+
Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 791 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 792 |
+
}
|
| 793 |
+
Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 794 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 795 |
+
}
|
| 796 |
+
Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 797 |
+
return _mm512_and_si512(a, b);
|
| 798 |
+
}
|
| 799 |
+
Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 800 |
+
return _mm512_or_si512(a, b);
|
| 801 |
+
}
|
| 802 |
+
Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 803 |
+
return _mm512_xor_si512(a, b);
|
| 804 |
+
}
|
| 805 |
+
|
| 806 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
|
| 807 |
+
return (*this == other) & Vectorized<BFloat16>(1.0f);
|
| 808 |
+
}
|
| 809 |
+
|
| 810 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
|
| 811 |
+
return (*this != other) & Vectorized<BFloat16>(1.0f);
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
|
| 815 |
+
return (*this > other) & Vectorized<BFloat16>(1.0f);
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
|
| 819 |
+
return (*this >= other) & Vectorized<BFloat16>(1.0f);
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
|
| 823 |
+
return (*this < other) & Vectorized<BFloat16>(1.0f);
|
| 824 |
+
}
|
| 825 |
+
|
| 826 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
|
| 827 |
+
return (*this <= other) & Vectorized<BFloat16>(1.0f);
|
| 828 |
+
}
|
| 829 |
+
|
| 830 |
+
// frac. Implement this here so we can use subtraction
|
| 831 |
+
inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
|
| 832 |
+
return *this - this->trunc();
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 836 |
+
// either input is a NaN.
|
| 837 |
+
template <>
|
| 838 |
+
Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 839 |
+
__m512 a_lo, a_hi;
|
| 840 |
+
__m512 b_lo, b_hi;
|
| 841 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 842 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 843 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 844 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 845 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 846 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 847 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 848 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 849 |
+
// Exploit the fact that all-ones is a NaN.
|
| 850 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 851 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 852 |
+
return cvtfp32_bf16(o1, o2);
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 856 |
+
// either input is a NaN.
|
| 857 |
+
template <>
|
| 858 |
+
Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
|
| 859 |
+
__m512 a_lo, a_hi;
|
| 860 |
+
__m512 b_lo, b_hi;
|
| 861 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 862 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 863 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 864 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 865 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 866 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 867 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 868 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 869 |
+
0xFFFFFFFF));
|
| 870 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 871 |
+
0xFFFFFFFF));
|
| 872 |
+
// Exploit the fact that all-ones is a NaN.
|
| 873 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 874 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 875 |
+
return cvtfp32_bf16(o1, o2);
|
| 876 |
+
}
|
| 877 |
+
|
| 878 |
+
template <>
|
| 879 |
+
Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
|
| 880 |
+
const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
|
| 881 |
+
__m512 a_lo, a_hi;
|
| 882 |
+
__m512 min_lo, min_hi;
|
| 883 |
+
__m512 max_lo, max_hi;
|
| 884 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 885 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 886 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 887 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 888 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 889 |
+
return cvtfp32_bf16(o1, o2);
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
template <>
|
| 893 |
+
Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
|
| 894 |
+
__m512 a_lo, a_hi;
|
| 895 |
+
__m512 max_lo, max_hi;
|
| 896 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 897 |
+
cvtbf16_fp32(__m512i(max), max_lo, max_hi);
|
| 898 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 899 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 900 |
+
return cvtfp32_bf16(o1, o2);
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
template <>
|
| 904 |
+
Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
|
| 905 |
+
__m512 a_lo, a_hi;
|
| 906 |
+
__m512 min_lo, min_hi;
|
| 907 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 908 |
+
cvtbf16_fp32(__m512i(min), min_lo, min_hi);
|
| 909 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 910 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 911 |
+
return cvtfp32_bf16(o1, o2);
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
template <>
|
| 915 |
+
inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
|
| 916 |
+
int64_t i;
|
| 917 |
+
#pragma unroll
|
| 918 |
+
for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
|
| 919 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 920 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 921 |
+
}
|
| 922 |
+
#pragma unroll
|
| 923 |
+
for (; i < n; i++) {
|
| 924 |
+
dst[i] = src[i];
|
| 925 |
+
}
|
| 926 |
+
}
|
| 927 |
+
|
| 928 |
+
template <>
|
| 929 |
+
inline void convert(const float* src, BFloat16* dst, int64_t n) {
|
| 930 |
+
int64_t i;
|
| 931 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 932 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 933 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 934 |
+
|
| 935 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 936 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 937 |
+
}
|
| 938 |
+
for (; i < n; i++) {
|
| 939 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 940 |
+
}
|
| 941 |
+
}
|
| 942 |
+
|
| 943 |
+
template <>
|
| 944 |
+
inline void convert(const double* src, BFloat16* dst, int64_t n) {
|
| 945 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 946 |
+
// Load one float vector from an array of doubles
|
| 947 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 948 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 949 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 950 |
+
};
|
| 951 |
+
|
| 952 |
+
int64_t i;
|
| 953 |
+
for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
|
| 954 |
+
__m512 a = load_float(&src[i]);
|
| 955 |
+
__m512 b = load_float(&src[i + 16]);
|
| 956 |
+
|
| 957 |
+
__m512i bf = cvtfp32_bf16(a, b);
|
| 958 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 959 |
+
}
|
| 960 |
+
for (; i < n; i++) {
|
| 961 |
+
dst[i] = c10::convert<BFloat16>(src[i]);
|
| 962 |
+
}
|
| 963 |
+
}
|
| 964 |
+
|
| 965 |
+
template <>
|
| 966 |
+
Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
|
| 967 |
+
const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
|
| 968 |
+
__m512 a_lo, a_hi;
|
| 969 |
+
__m512 b_lo, b_hi;
|
| 970 |
+
__m512 c_lo, c_hi;
|
| 971 |
+
cvtbf16_fp32(__m512i(a), a_lo, a_hi);
|
| 972 |
+
cvtbf16_fp32(__m512i(b), b_lo, b_hi);
|
| 973 |
+
cvtbf16_fp32(__m512i(c), c_lo, c_hi);
|
| 974 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 975 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 976 |
+
return cvtfp32_bf16(o1, o2);
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
|
| 980 |
+
__m512i r[8];
|
| 981 |
+
// a0a1 a2a3 a4a5 a6a7 a8a9 a10a11 a12a13 a14a15 e0e1 e2e3 e4e5 e6e7 e8e9 e10e11 e12e13 e14e15
|
| 982 |
+
// b0-b15 f0-f15
|
| 983 |
+
// c0-c15 g0-g15
|
| 984 |
+
// d0-d15 h0-h15
|
| 985 |
+
// i0-i15 m0-m15
|
| 986 |
+
// j0-j15 n0-n15
|
| 987 |
+
// k0-k15 o0-o15
|
| 988 |
+
// l0-l15 p0-p15
|
| 989 |
+
#pragma unroll(4)
|
| 990 |
+
for (int i = 0; i < 4; i++) {
|
| 991 |
+
r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01);
|
| 992 |
+
r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01);
|
| 993 |
+
}
|
| 994 |
+
|
| 995 |
+
// u0: a0a1 b0b1 a2a3 b2b3 a8a9 b8b9 a10a11 b10b11 e0e1 f0f1 e2e3 f2f3 e8e9 f8f9 e10e11 f10f11
|
| 996 |
+
// u1: a4a5 b4b5 a6a7 b6b7 a12a13 b12b13 a14a15 b14b15 e4e5 f4f5 e6e7 f6f7 e12e13 f12f13 e14e15 f14f15
|
| 997 |
+
// u2: c0c1 d0d1 c2c3 d2d3 c8c9 d8d9 c10c11 d10d11 g0g1 h0h1 g2g3 h2h3 g8g9 h8h9 g10g11 h10h11
|
| 998 |
+
// u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15
|
| 999 |
+
// i j m n
|
| 1000 |
+
// k l o p
|
| 1001 |
+
#pragma unroll(4)
|
| 1002 |
+
for (int i = 0; i < 8; i += 2) {
|
| 1003 |
+
u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]);
|
| 1004 |
+
u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]);
|
| 1005 |
+
}
|
| 1006 |
+
|
| 1007 |
+
// r0: a0a1 b0b1 c0c1 d0d1 a8a9 b8b9 c8c9 d8d9 e0e1 f0f1 g0g1 h0h1 e8e9 f8f9 g8g9 h8h9
|
| 1008 |
+
// r1: a2a3 b2b3 c2c3 d2d3 a10a11 b10b11 c10c11 d10d11 e2e3 f2f3 g2g3 h2h3 e10e11 f10f11 g10g11 h10h11
|
| 1009 |
+
// r2: a4a5 b4b5 c4c5 d4b5 a12a13 b12b13 c12c13 d12d13
|
| 1010 |
+
// r3: a6a7 b6b7 c6c7 d6b7 a14a15 b14b15 c14c15 d14d15
|
| 1011 |
+
// r4: i j k l m n o p
|
| 1012 |
+
r[0] = _mm512_unpacklo_epi64(u[0], u[2]);
|
| 1013 |
+
r[1] = _mm512_unpackhi_epi64(u[0], u[2]);
|
| 1014 |
+
r[2] = _mm512_unpacklo_epi64(u[1], u[3]);
|
| 1015 |
+
r[3] = _mm512_unpackhi_epi64(u[1], u[3]);
|
| 1016 |
+
r[4] = _mm512_unpacklo_epi64(u[4], u[6]);
|
| 1017 |
+
r[5] = _mm512_unpackhi_epi64(u[4], u[6]);
|
| 1018 |
+
r[6] = _mm512_unpacklo_epi64(u[5], u[7]);
|
| 1019 |
+
r[7] = _mm512_unpackhi_epi64(u[5], u[7]);
|
| 1020 |
+
|
| 1021 |
+
__m512i const1 = _mm512_set_epi32(
|
| 1022 |
+
0x00370035,
|
| 1023 |
+
0x00330031,
|
| 1024 |
+
0x00270025,
|
| 1025 |
+
0x00230021,
|
| 1026 |
+
0x00170015,
|
| 1027 |
+
0x00130011,
|
| 1028 |
+
0x00070005,
|
| 1029 |
+
0x00030001,
|
| 1030 |
+
0x00360034,
|
| 1031 |
+
0x00320030,
|
| 1032 |
+
0x00260024,
|
| 1033 |
+
0x00220020,
|
| 1034 |
+
0x00160014,
|
| 1035 |
+
0x00120010,
|
| 1036 |
+
0x00060004,
|
| 1037 |
+
0x00020000);
|
| 1038 |
+
__m512i const2 = _mm512_set_epi32(
|
| 1039 |
+
0x003f003d,
|
| 1040 |
+
0x003b0039,
|
| 1041 |
+
0x002f002d,
|
| 1042 |
+
0x002b0029,
|
| 1043 |
+
0x001f001d,
|
| 1044 |
+
0x001b0019,
|
| 1045 |
+
0x000f000d,
|
| 1046 |
+
0x000b0009,
|
| 1047 |
+
0x003e003c,
|
| 1048 |
+
0x003a0038,
|
| 1049 |
+
0x002e002c,
|
| 1050 |
+
0x002a0028,
|
| 1051 |
+
0x001e001c,
|
| 1052 |
+
0x001a0018,
|
| 1053 |
+
0x000e000c,
|
| 1054 |
+
0x000a0008);
|
| 1055 |
+
// merge values from two regs
|
| 1056 |
+
// 0-- 1--
|
| 1057 |
+
// 8-- 9--
|
| 1058 |
+
// 2-- 3--
|
| 1059 |
+
// 10-- 11--
|
| 1060 |
+
// 4-- 5--
|
| 1061 |
+
// 12-- 13--
|
| 1062 |
+
// 6-- 7--
|
| 1063 |
+
// 14-- 15--
|
| 1064 |
+
#pragma unroll(4)
|
| 1065 |
+
for (int i = 0; i < 4; i++) {
|
| 1066 |
+
u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]);
|
| 1067 |
+
u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]);
|
| 1068 |
+
}
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
// TODO(Leslie): Add the AVX2 Version of transpose_mxn for BFloat16 and Float16
|
| 1072 |
+
// Code referred to FBGEMM:
|
| 1073 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1074 |
+
template<>
|
| 1075 |
+
inline void transpose_mxn<BFloat16, 16, 16>(
|
| 1076 |
+
const BFloat16* src,
|
| 1077 |
+
int64_t ld_src,
|
| 1078 |
+
BFloat16* dst,
|
| 1079 |
+
int64_t ld_dst) {
|
| 1080 |
+
__m256i t[16];
|
| 1081 |
+
// load from src to registers
|
| 1082 |
+
// a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
|
| 1083 |
+
// b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
|
| 1084 |
+
// c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
|
| 1085 |
+
// d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
|
| 1086 |
+
// e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
|
| 1087 |
+
// f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
|
| 1088 |
+
// g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
|
| 1089 |
+
// h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
|
| 1090 |
+
// i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
|
| 1091 |
+
// j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
|
| 1092 |
+
// k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
|
| 1093 |
+
// l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
|
| 1094 |
+
// m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
|
| 1095 |
+
// n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
|
| 1096 |
+
// o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
|
| 1097 |
+
// p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
|
| 1098 |
+
#pragma unroll(16)
|
| 1099 |
+
for (int i = 0; i < 16; i++) {
|
| 1100 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1101 |
+
}
|
| 1102 |
+
|
| 1103 |
+
__m512i u[8];
|
| 1104 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1105 |
+
|
| 1106 |
+
#pragma unroll(8)
|
| 1107 |
+
for (int i = 0; i < 8; i++) {
|
| 1108 |
+
_mm256_storeu_si256(
|
| 1109 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1110 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1111 |
+
_mm256_storeu_si256(
|
| 1112 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1113 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1114 |
+
}
|
| 1115 |
+
}
|
| 1116 |
+
|
| 1117 |
+
// Code referred to FBGEMM:
|
| 1118 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
|
| 1119 |
+
template<>
|
| 1120 |
+
inline void transpose_mxn<Half, 16, 16>(
|
| 1121 |
+
const Half* src,
|
| 1122 |
+
int64_t ld_src,
|
| 1123 |
+
Half* dst,
|
| 1124 |
+
int64_t ld_dst) {
|
| 1125 |
+
__m256i t[16];
|
| 1126 |
+
// load from src to registers
|
| 1127 |
+
// Same matrix indices as above transpose_mxn<BFloat16, 16, 16>
|
| 1128 |
+
#pragma unroll(16)
|
| 1129 |
+
for (int i = 0; i < 16; i++) {
|
| 1130 |
+
t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
|
| 1131 |
+
}
|
| 1132 |
+
|
| 1133 |
+
__m512i u[8];
|
| 1134 |
+
_transpose_mxn_half_16_16(t, u);
|
| 1135 |
+
|
| 1136 |
+
#pragma unroll(8)
|
| 1137 |
+
for (int i = 0; i < 8; i++) {
|
| 1138 |
+
_mm256_storeu_si256(
|
| 1139 |
+
reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
|
| 1140 |
+
_mm512_extracti32x8_epi32(u[i], 0x0));
|
| 1141 |
+
_mm256_storeu_si256(
|
| 1142 |
+
reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
|
| 1143 |
+
_mm512_extracti32x8_epi32(u[i], 0x01));
|
| 1144 |
+
}
|
| 1145 |
+
}
|
| 1146 |
+
|
| 1147 |
+
static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
|
| 1148 |
+
// t[0]: 0 32 1 33 2 34 3 35 8 40 9 41 10 42 11 43 16 ... 59
|
| 1149 |
+
// t[1]: 4 36 5 37 6 38 7 39 12 44 13 45 14 46 15 47 20 ... 63
|
| 1150 |
+
// t[2]: 64 96 65 97 66 98 67 99 72 104 73 105 74 106 75 ... 123
|
| 1151 |
+
// t[3]: 68 100 69 101 70 102 71 103 76 108 77 109 78 110 79 111 84 ... 127
|
| 1152 |
+
// t[4]: 128 160 129 161 130 162 131 163 136 168 137 169 138 170 139 171 144 ... 187
|
| 1153 |
+
// t[5]: 132 164 133 165 134 166 135 167 140 172 141 173 142 174 143 175 148 ... 191
|
| 1154 |
+
// t[6]: 192 224 193 225 194 226 195 227 200 232 201 233 202 234 203 235 208 ... 251
|
| 1155 |
+
// t[7]: 196 228 197 229 198 230 199 231 204 236 205 237 206 238 207 239 212 ... 255
|
| 1156 |
+
// t[8]: 256 288 257 289 258 290 259 291 264 296 265 297 266 298 267 299 272 ... 315
|
| 1157 |
+
// t[9]: 260 292 261 293 262 294 263 295 268 300 269 301 270 302 271 303 276 ... 319
|
| 1158 |
+
// t[10]: 320 352 321 353 322 354 323 355 328 360 329 361 330 362 331 363 336 ... 379
|
| 1159 |
+
// t[11]: 324 356 325 357 326 358 327 359 332 364 333 365 334 366 335 367 340 ... 383
|
| 1160 |
+
// t[12]: 384 416 385 417 386 418 387 419 392 424 393 425 394 426 395 427 400 ... 443
|
| 1161 |
+
// t[13]: 388 420 389 421 390 422 391 423 396 428 397 429 398 430 399 431 404 ... 447
|
| 1162 |
+
// t[14]: 448 480 449 481 450 482 451 483 456 488 457 489 458 490 459 491 464 ... 507
|
| 1163 |
+
// t[15]: 452 484 453 485 454 486 455 487 460 492 461 493 462 494 463 495 468 ... 511
|
| 1164 |
+
// t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571
|
| 1165 |
+
// ...
|
| 1166 |
+
// t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023
|
| 1167 |
+
#pragma unroll(16)
|
| 1168 |
+
for (int i = 0; i < 16; ++i) {
|
| 1169 |
+
d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1170 |
+
d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]);
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
// t[0]: 0 32 64 96 1 33 65 97 8 40 72 104 9 41 73 105 16 ... 121
|
| 1174 |
+
// t[1]: 2 34 66 98 3 35 67 99 10 42 74 106 11 43 75 107 18 ... 123
|
| 1175 |
+
// t[2]: 4 36 68 100 5 37 69 101 12 44 76 108 13 45 77 109 20 ... 125
|
| 1176 |
+
// t[3]: 6 38 70 102 7 39 71 103 14 46 78 110 15 47 79 111 22 ... 127
|
| 1177 |
+
// t[4]: 128 160 192 224 129 161 193 225 136 168 200 232 137 169 201 233 144 ... 249
|
| 1178 |
+
// t[5]: 130 162 194 226 131 163 195 227 138 170 202 234 139 171 203 235 146 ... 251
|
| 1179 |
+
// t[6]: 132 164 196 228 133 165 197 229 140 172 204 236 141 173 205 237 148 ... 253
|
| 1180 |
+
// t[7]: 134 166 198 230 135 167 199 231 142 174 206 238 143 175 207 239 150 ... 255
|
| 1181 |
+
// t[8]: 256 288 320 352 257 289 321 353 264 296 328 360 265 297 329 361 272 ... 377
|
| 1182 |
+
// t[9]: 258 290 322 354 259 291 323 355 266 298 330 362 267 299 331 363 274 ... 379
|
| 1183 |
+
// t[10]: 260 292 324 356 261 293 325 357 268 300 332 364 269 301 333 365 276 ... 381
|
| 1184 |
+
// t[11]: 262 294 326 358 263 295 327 359 270 302 334 366 271 303 335 367 278 ... 383
|
| 1185 |
+
// t[12]: 384 416 448 480 385 417 449 481 392 424 456 488 393 425 457 489 400 ... 505
|
| 1186 |
+
// t[13]: 386 418 450 482 387 419 451 483 394 426 458 490 395 427 459 491 402 ... 507
|
| 1187 |
+
// t[14]: 388 420 452 484 389 421 453 485 396 428 460 492 397 429 461 493 404 ... 509
|
| 1188 |
+
// t[15]: 390 422 454 486 391 423 455 487 398 430 462 494 399 431 463 495 406 ... 511
|
| 1189 |
+
// t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633
|
| 1190 |
+
// ...
|
| 1191 |
+
// t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023
|
| 1192 |
+
#pragma unroll(8)
|
| 1193 |
+
for (int i = 0; i < 8; ++i) {
|
| 1194 |
+
r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1195 |
+
r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]);
|
| 1196 |
+
r[i * 4 + 2] = _mm512_unpacklo_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1197 |
+
r[i * 4 + 3] = _mm512_unpackhi_epi32(d[i * 4 + 1], d[i * 4 + 3]);
|
| 1198 |
+
}
|
| 1199 |
+
|
| 1200 |
+
// t[0]: 0 32 64 96 128 160 192 224 8 40 72 104 136 168 200 232 16 ... 248
|
| 1201 |
+
// t[1]: 1 33 65 97 129 161 193 225 9 41 73 105 137 169 201 233 17 ... 249
|
| 1202 |
+
// t[2]: 2 34 66 98 130 162 194 226 10 42 74 106 138 170 202 234 18 ... 250
|
| 1203 |
+
// t[3]: 3 35 67 99 131 163 195 227 11 43 75 107 139 171 203 235 19 ... 251
|
| 1204 |
+
// t[4]: 4 36 68 100 132 164 196 228 12 44 76 108 140 172 204 236 20 ... 252
|
| 1205 |
+
// t[5]: 5 37 69 101 133 165 197 229 13 45 77 109 141 173 205 237 21 ... 253
|
| 1206 |
+
// t[6]: 6 38 70 102 134 166 198 230 14 46 78 110 142 174 206 238 22 ... 254
|
| 1207 |
+
// t[7]: 7 39 71 103 135 167 199 231 15 47 79 111 143 175 207 239 23 ... 255
|
| 1208 |
+
// t[8]: 256 288 320 352 384 416 448 480 264 296 328 360 392 424 456 488 272 ... 504
|
| 1209 |
+
// t[9]: 257 289 321 353 385 417 449 481 265 297 329 361 393 425 457 489 273 ... 505
|
| 1210 |
+
// t[10]: 258 290 322 354 386 418 450 482 266 298 330 362 394 426 458 490 274 ... 506
|
| 1211 |
+
// t[11]: 259 291 323 355 387 419 451 483 267 299 331 363 395 427 459 491 275 ... 507
|
| 1212 |
+
// t[12]: 260 292 324 356 388 420 452 484 268 300 332 364 396 428 460 492 276 ... 508
|
| 1213 |
+
// t[13]: 261 293 325 357 389 421 453 485 269 301 333 365 397 429 461 493 277 ... 509
|
| 1214 |
+
// t[14]: 262 294 326 358 390 422 454 486 270 302 334 366 398 430 462 494 278 ... 510
|
| 1215 |
+
// t[15]: 263 295 327 359 391 423 455 487 271 303 335 367 399 431 463 495 279 ... 511
|
| 1216 |
+
// t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760
|
| 1217 |
+
// ...
|
| 1218 |
+
// t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023
|
| 1219 |
+
#pragma unroll(4)
|
| 1220 |
+
for (int i = 0; i < 4; ++i) {
|
| 1221 |
+
d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1222 |
+
d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]);
|
| 1223 |
+
d[i * 8 + 2] = _mm512_unpacklo_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1224 |
+
d[i * 8 + 3] = _mm512_unpackhi_epi64(r[i * 8 + 1], r[i * 8 + 5]);
|
| 1225 |
+
d[i * 8 + 4] = _mm512_unpacklo_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1226 |
+
d[i * 8 + 5] = _mm512_unpackhi_epi64(r[i * 8 + 2], r[i * 8 + 6]);
|
| 1227 |
+
d[i * 8 + 6] = _mm512_unpacklo_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1228 |
+
d[i * 8 + 7] = _mm512_unpackhi_epi64(r[i * 8 + 3], r[i * 8 + 7]);
|
| 1229 |
+
}
|
| 1230 |
+
|
| 1231 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 16 ... 496
|
| 1232 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 17 ... 497
|
| 1233 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 18 ... 498
|
| 1234 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 19 ... 499
|
| 1235 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 20 ... 500
|
| 1236 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 21 ... 501
|
| 1237 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 22 ... 502
|
| 1238 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 23 ... 503
|
| 1239 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 24 ... 504
|
| 1240 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 25 ... 505
|
| 1241 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 26 ... 506
|
| 1242 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 27 ... 507
|
| 1243 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 28 ... 508
|
| 1244 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 29 ... 509
|
| 1245 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 30 ... 510
|
| 1246 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 31 ... 511
|
| 1247 |
+
// t[16]: 512 544 576 608 640 672 704 736 768 800 832 864 896 928 960 992 528 ... 1008
|
| 1248 |
+
// ...
|
| 1249 |
+
// t[31]: 527 559 591 623 655 687 719 751 783 815 847 879 911 943 975 1007 543 ... 1023
|
| 1250 |
+
__m512i const1 = _mm512_set_epi64(
|
| 1251 |
+
0x000000000000000d,
|
| 1252 |
+
0x000000000000000c,
|
| 1253 |
+
0x0000000000000005,
|
| 1254 |
+
0x0000000000000004,
|
| 1255 |
+
0x0000000000000009,
|
| 1256 |
+
0x0000000000000008,
|
| 1257 |
+
0x0000000000000001,
|
| 1258 |
+
0x0000000000000000);
|
| 1259 |
+
__m512i const2 = _mm512_set_epi64(
|
| 1260 |
+
0x000000000000000f,
|
| 1261 |
+
0x000000000000000e,
|
| 1262 |
+
0x0000000000000007,
|
| 1263 |
+
0x0000000000000006,
|
| 1264 |
+
0x000000000000000b,
|
| 1265 |
+
0x000000000000000a,
|
| 1266 |
+
0x0000000000000003,
|
| 1267 |
+
0x0000000000000002);
|
| 1268 |
+
#pragma unroll(8)
|
| 1269 |
+
for (int i = 0; i < 8; ++i) {
|
| 1270 |
+
r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]);
|
| 1271 |
+
r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]);
|
| 1272 |
+
r[i + 16] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const1, d[i + 24]);
|
| 1273 |
+
r[i + 24] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const2, d[i + 24]);
|
| 1274 |
+
}
|
| 1275 |
+
|
| 1276 |
+
// t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 512 544 ... 992
|
| 1277 |
+
// t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 513 545 ... 993
|
| 1278 |
+
// t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 514 546 ... 994
|
| 1279 |
+
// t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 515 547 ... 995
|
| 1280 |
+
// t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 516 548 ... 996
|
| 1281 |
+
// t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 517 549 ... 997
|
| 1282 |
+
// t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 518 550 ... 998
|
| 1283 |
+
// t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 519 551 ... 999
|
| 1284 |
+
// t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 520 552 ... 1000
|
| 1285 |
+
// t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 521 553 ... 1001
|
| 1286 |
+
// t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 522 554 ... 1002
|
| 1287 |
+
// t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 523 555 ... 1003
|
| 1288 |
+
// t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 524 556 ... 1004
|
| 1289 |
+
// t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 525 557 ... 1005
|
| 1290 |
+
// t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 526 558 ... 1006
|
| 1291 |
+
// t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 527 559 ... 1007
|
| 1292 |
+
// t[16]: 16 48 80 112 144 176 208 240 272 304 336 368 400 432 464 496 528 560 ... 1008
|
| 1293 |
+
// ...
|
| 1294 |
+
// t[31]: 31 63 95 127 159 191 223 255 287 319 351 383 415 447 479 511 543 575 ... 1023
|
| 1295 |
+
__m512i const3 = _mm512_set_epi64(
|
| 1296 |
+
0x000000000000000b,
|
| 1297 |
+
0x000000000000000a,
|
| 1298 |
+
0x0000000000000009,
|
| 1299 |
+
0x0000000000000008,
|
| 1300 |
+
0x0000000000000003,
|
| 1301 |
+
0x0000000000000002,
|
| 1302 |
+
0x0000000000000001,
|
| 1303 |
+
0x0000000000000000);
|
| 1304 |
+
__m512i const4 = _mm512_set_epi64(
|
| 1305 |
+
0x000000000000000f,
|
| 1306 |
+
0x000000000000000e,
|
| 1307 |
+
0x000000000000000d,
|
| 1308 |
+
0x000000000000000c,
|
| 1309 |
+
0x0000000000000007,
|
| 1310 |
+
0x0000000000000006,
|
| 1311 |
+
0x0000000000000005,
|
| 1312 |
+
0x0000000000000004);
|
| 1313 |
+
#pragma unroll(16)
|
| 1314 |
+
for (int i = 0; i < 16; ++i) {
|
| 1315 |
+
d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]);
|
| 1316 |
+
d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]);
|
| 1317 |
+
}
|
| 1318 |
+
}
|
| 1319 |
+
|
| 1320 |
+
// Code referred to FBGEMM:
|
| 1321 |
+
// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
|
| 1322 |
+
template<>
|
| 1323 |
+
inline void transpose_mxn<BFloat16, 32, 32>(
|
| 1324 |
+
const BFloat16* src,
|
| 1325 |
+
int64_t ld_src,
|
| 1326 |
+
BFloat16* dst,
|
| 1327 |
+
int64_t ld_dst) {
|
| 1328 |
+
// Load from memory
|
| 1329 |
+
__m512i r[32];
|
| 1330 |
+
#pragma unroll(32)
|
| 1331 |
+
for (int i = 0; i < 32; ++i) {
|
| 1332 |
+
r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
|
| 1333 |
+
}
|
| 1334 |
+
|
| 1335 |
+
__m512i d[32];
|
| 1336 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1337 |
+
|
| 1338 |
+
// Store to dst
|
| 1339 |
+
#pragma unroll(32)
|
| 1340 |
+
for (int i = 0; i < 32; ++i) {
|
| 1341 |
+
_mm512_storeu_si512(dst + i* ld_dst, d[i]);
|
| 1342 |
+
}
|
| 1343 |
+
}
|
| 1344 |
+
|
| 1345 |
+
template<>
|
| 1346 |
+
inline void transpose_mxn<Half, 32, 32>(
|
| 1347 |
+
const Half* src,
|
| 1348 |
+
int64_t ld_src,
|
| 1349 |
+
Half* dst,
|
| 1350 |
+
int64_t ld_dst) {
|
| 1351 |
+
// Load from memory
|
| 1352 |
+
__m512i r[32];
|
| 1353 |
+
#pragma unroll(32)
|
| 1354 |
+
for (int i = 0; i < 32; ++i) {
|
| 1355 |
+
r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
|
| 1356 |
+
}
|
| 1357 |
+
|
| 1358 |
+
__m512i d[32];
|
| 1359 |
+
_transpose_mxn_half_32_32(r, d);
|
| 1360 |
+
|
| 1361 |
+
// Store to dst
|
| 1362 |
+
#pragma unroll(32)
|
| 1363 |
+
for (int i = 0; i < 32; ++i) {
|
| 1364 |
+
_mm512_storeu_si512(dst + i* ld_dst, d[i]);
|
| 1365 |
+
}
|
| 1366 |
+
}
|
| 1367 |
+
|
| 1368 |
+
template <>
|
| 1369 |
+
class Vectorized<Half>: public Vectorized16<Half> {
|
| 1370 |
+
public:
|
| 1371 |
+
using Vectorized16::Vectorized16;
|
| 1372 |
+
|
| 1373 |
+
Vectorized<Half> frac() const;
|
| 1374 |
+
|
| 1375 |
+
Vectorized<Half> eq(const Vectorized<Half>& other) const;
|
| 1376 |
+
Vectorized<Half> ne(const Vectorized<Half>& other) const;
|
| 1377 |
+
Vectorized<Half> gt(const Vectorized<Half>& other) const;
|
| 1378 |
+
Vectorized<Half> ge(const Vectorized<Half>& other) const;
|
| 1379 |
+
Vectorized<Half> lt(const Vectorized<Half>& other) const;
|
| 1380 |
+
Vectorized<Half> le(const Vectorized<Half>& other) const;
|
| 1381 |
+
};
|
| 1382 |
+
|
| 1383 |
+
Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1384 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
|
| 1385 |
+
}
|
| 1386 |
+
Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1387 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
|
| 1388 |
+
}
|
| 1389 |
+
Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1390 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
|
| 1391 |
+
}
|
| 1392 |
+
Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1393 |
+
return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
|
| 1394 |
+
}
|
| 1395 |
+
|
| 1396 |
+
Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1397 |
+
return _mm512_and_si512(a, b);
|
| 1398 |
+
}
|
| 1399 |
+
Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1400 |
+
return _mm512_or_si512(a, b);
|
| 1401 |
+
}
|
| 1402 |
+
Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1403 |
+
return _mm512_xor_si512(a, b);
|
| 1404 |
+
}
|
| 1405 |
+
|
| 1406 |
+
inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
|
| 1407 |
+
return (*this == other) & Vectorized<Half>(1.0f);
|
| 1408 |
+
}
|
| 1409 |
+
|
| 1410 |
+
inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
|
| 1411 |
+
return (*this != other) & Vectorized<Half>(1.0f);
|
| 1412 |
+
}
|
| 1413 |
+
|
| 1414 |
+
inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
|
| 1415 |
+
return (*this > other) & Vectorized<Half>(1.0f);
|
| 1416 |
+
}
|
| 1417 |
+
|
| 1418 |
+
inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
|
| 1419 |
+
return (*this >= other) & Vectorized<Half>(1.0f);
|
| 1420 |
+
}
|
| 1421 |
+
|
| 1422 |
+
inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
|
| 1423 |
+
return (*this < other) & Vectorized<Half>(1.0f);
|
| 1424 |
+
}
|
| 1425 |
+
|
| 1426 |
+
inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
|
| 1427 |
+
return (*this <= other) & Vectorized<Half>(1.0f);
|
| 1428 |
+
}
|
| 1429 |
+
|
| 1430 |
+
// frac. Implement this here so we can use subtraction
|
| 1431 |
+
inline Vectorized<Half> Vectorized<Half>::frac() const {
|
| 1432 |
+
return *this - this->trunc();
|
| 1433 |
+
}
|
| 1434 |
+
|
| 1435 |
+
// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
|
| 1436 |
+
// either input is a NaN.
|
| 1437 |
+
template <>
|
| 1438 |
+
Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1439 |
+
__m512 a_lo, a_hi;
|
| 1440 |
+
__m512 b_lo, b_hi;
|
| 1441 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1442 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1443 |
+
auto max_lo = _mm512_max_ps(a_lo, b_lo);
|
| 1444 |
+
auto max_hi = _mm512_max_ps(a_hi, b_hi);
|
| 1445 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1446 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1447 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
|
| 1448 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
|
| 1449 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1450 |
+
auto o1 = _mm512_or_ps(max_lo, nan_lo);
|
| 1451 |
+
auto o2 = _mm512_or_ps(max_hi, nan_hi);
|
| 1452 |
+
return cvtfp32_fp16(o1, o2);
|
| 1453 |
+
}
|
| 1454 |
+
|
| 1455 |
+
// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
|
| 1456 |
+
// either input is a NaN.
|
| 1457 |
+
template <>
|
| 1458 |
+
Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
|
| 1459 |
+
__m512 a_lo, a_hi;
|
| 1460 |
+
__m512 b_lo, b_hi;
|
| 1461 |
+
__m512i zero_vec = _mm512_set1_epi32(0);
|
| 1462 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1463 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1464 |
+
auto min_lo = _mm512_min_ps(a_lo, b_lo);
|
| 1465 |
+
auto min_hi = _mm512_min_ps(a_hi, b_hi);
|
| 1466 |
+
auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
|
| 1467 |
+
auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
|
| 1468 |
+
auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
|
| 1469 |
+
0xFFFFFFFF));
|
| 1470 |
+
auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
|
| 1471 |
+
0xFFFFFFFF));
|
| 1472 |
+
// Exploit the fact that all-ones is a NaN.
|
| 1473 |
+
auto o1 = _mm512_or_ps(min_lo, nan_lo);
|
| 1474 |
+
auto o2 = _mm512_or_ps(min_hi, nan_hi);
|
| 1475 |
+
return cvtfp32_fp16(o1, o2);
|
| 1476 |
+
}
|
| 1477 |
+
|
| 1478 |
+
template <>
|
| 1479 |
+
Vectorized<Half> inline clamp(const Vectorized<Half>& a,
|
| 1480 |
+
const Vectorized<Half>& min, const Vectorized<Half>& max) {
|
| 1481 |
+
__m512 a_lo, a_hi;
|
| 1482 |
+
__m512 min_lo, min_hi;
|
| 1483 |
+
__m512 max_lo, max_hi;
|
| 1484 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1485 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1486 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1487 |
+
auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
|
| 1488 |
+
auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
|
| 1489 |
+
return cvtfp32_fp16(o1, o2);
|
| 1490 |
+
}
|
| 1491 |
+
|
| 1492 |
+
template <>
|
| 1493 |
+
Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
|
| 1494 |
+
__m512 a_lo, a_hi;
|
| 1495 |
+
__m512 max_lo, max_hi;
|
| 1496 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1497 |
+
cvtfp16_fp32(__m512i(max), max_lo, max_hi);
|
| 1498 |
+
auto o1 = _mm512_min_ps(max_lo, a_lo);
|
| 1499 |
+
auto o2 = _mm512_min_ps(max_hi, a_hi);
|
| 1500 |
+
return cvtfp32_fp16(o1, o2);
|
| 1501 |
+
}
|
| 1502 |
+
|
| 1503 |
+
template <>
|
| 1504 |
+
Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
|
| 1505 |
+
__m512 a_lo, a_hi;
|
| 1506 |
+
__m512 min_lo, min_hi;
|
| 1507 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1508 |
+
cvtfp16_fp32(__m512i(min), min_lo, min_hi);
|
| 1509 |
+
auto o1 = _mm512_max_ps(min_lo, a_lo);
|
| 1510 |
+
auto o2 = _mm512_max_ps(min_hi, a_hi);
|
| 1511 |
+
return cvtfp32_fp16(o1, o2);
|
| 1512 |
+
}
|
| 1513 |
+
|
| 1514 |
+
template <>
|
| 1515 |
+
inline void convert(const Half* src, Half* dst, int64_t n) {
|
| 1516 |
+
int64_t i;
|
| 1517 |
+
#pragma unroll
|
| 1518 |
+
for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
|
| 1519 |
+
auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
|
| 1520 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
|
| 1521 |
+
}
|
| 1522 |
+
#pragma unroll
|
| 1523 |
+
for (; i < n; i++) {
|
| 1524 |
+
dst[i] = src[i];
|
| 1525 |
+
}
|
| 1526 |
+
}
|
| 1527 |
+
|
| 1528 |
+
template <>
|
| 1529 |
+
inline void convert(const float* src, Half* dst, int64_t n) {
|
| 1530 |
+
int64_t i;
|
| 1531 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1532 |
+
__m512 a = _mm512_loadu_ps(&src[i]);
|
| 1533 |
+
__m512 b = _mm512_loadu_ps(&src[i + 16]);
|
| 1534 |
+
|
| 1535 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1536 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1537 |
+
}
|
| 1538 |
+
for (; i < n; i++) {
|
| 1539 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1540 |
+
}
|
| 1541 |
+
}
|
| 1542 |
+
|
| 1543 |
+
template <>
|
| 1544 |
+
inline void convert(const double* src, Half* dst, int64_t n) {
|
| 1545 |
+
auto load_float = [](const double *src) -> __m512 {
|
| 1546 |
+
// Load one float vector from an array of doubles
|
| 1547 |
+
__m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
|
| 1548 |
+
__m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
|
| 1549 |
+
return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
|
| 1550 |
+
};
|
| 1551 |
+
|
| 1552 |
+
int64_t i;
|
| 1553 |
+
for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
|
| 1554 |
+
__m512 a = load_float(&src[i]);
|
| 1555 |
+
__m512 b = load_float(&src[i + 16]);
|
| 1556 |
+
|
| 1557 |
+
__m512i bf = cvtfp32_fp16(a, b);
|
| 1558 |
+
_mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
|
| 1559 |
+
}
|
| 1560 |
+
for (; i < n; i++) {
|
| 1561 |
+
dst[i] = c10::convert<Half>(src[i]);
|
| 1562 |
+
}
|
| 1563 |
+
}
|
| 1564 |
+
|
| 1565 |
+
template <>
|
| 1566 |
+
Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
|
| 1567 |
+
const Vectorized<Half>& b, const Vectorized<Half>& c) {
|
| 1568 |
+
__m512 a_lo, a_hi;
|
| 1569 |
+
__m512 b_lo, b_hi;
|
| 1570 |
+
__m512 c_lo, c_hi;
|
| 1571 |
+
cvtfp16_fp32(__m512i(a), a_lo, a_hi);
|
| 1572 |
+
cvtfp16_fp32(__m512i(b), b_lo, b_hi);
|
| 1573 |
+
cvtfp16_fp32(__m512i(c), c_lo, c_hi);
|
| 1574 |
+
auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
|
| 1575 |
+
auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
|
| 1576 |
+
return cvtfp32_fp16(o1, o2);
|
| 1577 |
+
}
|
| 1578 |
+
|
| 1579 |
+
#define CONVERT_VECTORIZED_INIT(type, name) \
|
| 1580 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1581 |
+
__m512 o1, o2; \
|
| 1582 |
+
cvt_to_fp32<type>(__m512i(a), o1, o2); \
|
| 1583 |
+
return std::make_tuple(o1, o2); \
|
| 1584 |
+
} \
|
| 1585 |
+
\
|
| 1586 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1587 |
+
return cvt_from_fp32<type>(__m512(a), __m512(b)); \
|
| 1588 |
+
}
|
| 1589 |
+
CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1590 |
+
CONVERT_VECTORIZED_INIT(Half, half);
|
| 1591 |
+
|
| 1592 |
+
#else //defined(CPU_CAPABILITY_AVX512)
|
| 1593 |
+
|
| 1594 |
+
#define CONVERT_NON_VECTORIZED_INIT(type, name) \
|
| 1595 |
+
inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
|
| 1596 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1597 |
+
__at_align__ float arr[K]; \
|
| 1598 |
+
__at_align__ type arr2[K]; \
|
| 1599 |
+
a.store(arr2); \
|
| 1600 |
+
for (const auto k : c10::irange(K)) { \
|
| 1601 |
+
arr[k] = c10::convert<float>(arr2[k]); \
|
| 1602 |
+
} \
|
| 1603 |
+
return std::make_tuple( \
|
| 1604 |
+
Vectorized<float>::loadu(arr), \
|
| 1605 |
+
Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
|
| 1606 |
+
} \
|
| 1607 |
+
\
|
| 1608 |
+
inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
|
| 1609 |
+
constexpr int64_t K = Vectorized<type>::size(); \
|
| 1610 |
+
__at_align__ float arr[K]; \
|
| 1611 |
+
__at_align__ type arr2[K]; \
|
| 1612 |
+
a.store(arr); \
|
| 1613 |
+
b.store(arr + Vectorized<float>::size()); \
|
| 1614 |
+
for (const auto k : c10::irange(K)) { \
|
| 1615 |
+
arr2[k] = c10::convert<type>(arr[k]); \
|
| 1616 |
+
} \
|
| 1617 |
+
return Vectorized<type>::loadu(arr2); \
|
| 1618 |
+
}
|
| 1619 |
+
CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
|
| 1620 |
+
CONVERT_NON_VECTORIZED_INIT(Half, half);
|
| 1621 |
+
|
| 1622 |
+
#endif // defined(CPU_CAPABILITY_AVX512)
|
| 1623 |
+
|
| 1624 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 1625 |
+
#define LOAD_FP32_VECTORIZED_INIT(type, name) \
|
| 1626 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1627 |
+
auto values = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(data)); \
|
| 1628 |
+
__m512 out_values; \
|
| 1629 |
+
cvt_to_fp32<type>(values, out_values); \
|
| 1630 |
+
out = out_values; \
|
| 1631 |
+
} \
|
| 1632 |
+
\
|
| 1633 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1634 |
+
auto vec = Vectorized<type>::loadu(data); \
|
| 1635 |
+
__m512 out1_values, out2_values; \
|
| 1636 |
+
cvt_to_fp32<type>(vec, out1_values, out2_values); \
|
| 1637 |
+
out1 = out1_values; \
|
| 1638 |
+
out2 = out2_values; \
|
| 1639 |
+
}
|
| 1640 |
+
LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
|
| 1641 |
+
LOAD_FP32_VECTORIZED_INIT(Half, fp16);
|
| 1642 |
+
|
| 1643 |
+
#else // defined(CPU_CAPABILITY_AVX512)
|
| 1644 |
+
#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
|
| 1645 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
|
| 1646 |
+
__at_align__ float values[Vectorized<float>::size()]; \
|
| 1647 |
+
for (const auto k : c10::irange(Vectorized<float>::size())) { \
|
| 1648 |
+
values[k] = data[k]; \
|
| 1649 |
+
} \
|
| 1650 |
+
out = Vectorized<float>::loadu(values); \
|
| 1651 |
+
} \
|
| 1652 |
+
\
|
| 1653 |
+
inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
|
| 1654 |
+
load_fp32_from_##name(data, out1); \
|
| 1655 |
+
data += Vectorized<float>::size(); \
|
| 1656 |
+
load_fp32_from_##name(data, out2); \
|
| 1657 |
+
}
|
| 1658 |
+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
|
| 1659 |
+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
|
| 1660 |
+
|
| 1661 |
+
#endif
|
| 1662 |
+
}}}
|
parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// DO NOT DEFINE STATIC DATA IN THIS HEADER!
|
| 4 |
+
// See Note [Do not compile initializers with AVX]
|
| 5 |
+
|
| 6 |
+
#include <c10/util/complex.h>
|
| 7 |
+
#include <c10/util/irange.h>
|
| 8 |
+
#include <ATen/cpu/vec/intrinsics.h>
|
| 9 |
+
#include <ATen/cpu/vec/vec_base.h>
|
| 10 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 11 |
+
#define SLEEF_STATIC_LIBS
|
| 12 |
+
#include <sleef.h>
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
namespace at {
|
| 16 |
+
namespace vec {
|
| 17 |
+
// See Note [CPU_CAPABILITY namespace]
|
| 18 |
+
inline namespace CPU_CAPABILITY {
|
| 19 |
+
|
| 20 |
+
#if defined(CPU_CAPABILITY_AVX512)
|
| 21 |
+
|
| 22 |
+
template <> class Vectorized<c10::complex<double>> {
|
| 23 |
+
private:
|
| 24 |
+
__m512d values;
|
| 25 |
+
static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
|
| 26 |
+
public:
|
| 27 |
+
using value_type = c10::complex<double>;
|
| 28 |
+
using size_type = int;
|
| 29 |
+
static constexpr size_type size() {
|
| 30 |
+
return 4;
|
| 31 |
+
}
|
| 32 |
+
Vectorized() {}
|
| 33 |
+
Vectorized(__m512d v) : values(v) {}
|
| 34 |
+
Vectorized(c10::complex<double> val) {
|
| 35 |
+
double real_value = val.real();
|
| 36 |
+
double imag_value = val.imag();
|
| 37 |
+
values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value,
|
| 38 |
+
real_value, imag_value, real_value, imag_value);
|
| 39 |
+
}
|
| 40 |
+
Vectorized(c10::complex<double> val1, c10::complex<double> val2,
|
| 41 |
+
c10::complex<double> val3, c10::complex<double> val4) {
|
| 42 |
+
values = _mm512_setr_pd(val1.real(), val1.imag(),
|
| 43 |
+
val2.real(), val2.imag(),
|
| 44 |
+
val3.real(), val3.imag(),
|
| 45 |
+
val4.real(), val4.imag());
|
| 46 |
+
}
|
| 47 |
+
operator __m512d() const {
|
| 48 |
+
return values;
|
| 49 |
+
}
|
| 50 |
+
template <int64_t mask>
|
| 51 |
+
static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a,
|
| 52 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 53 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 54 |
+
// NOLINTNEXTLINE(clang-diagnostic-warning)
|
| 55 |
+
switch (mask) {
|
| 56 |
+
case 0:
|
| 57 |
+
return a;
|
| 58 |
+
case 1:
|
| 59 |
+
return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011
|
| 60 |
+
case 2:
|
| 61 |
+
return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100
|
| 62 |
+
case 3:
|
| 63 |
+
return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111
|
| 64 |
+
case 4:
|
| 65 |
+
return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000
|
| 66 |
+
case 5:
|
| 67 |
+
return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011
|
| 68 |
+
case 6:
|
| 69 |
+
return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100
|
| 70 |
+
case 7:
|
| 71 |
+
return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111
|
| 72 |
+
case 8:
|
| 73 |
+
return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000
|
| 74 |
+
case 9:
|
| 75 |
+
return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011
|
| 76 |
+
case 10:
|
| 77 |
+
return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100
|
| 78 |
+
case 11:
|
| 79 |
+
return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111
|
| 80 |
+
case 12:
|
| 81 |
+
return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000
|
| 82 |
+
case 13:
|
| 83 |
+
return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011
|
| 84 |
+
case 14:
|
| 85 |
+
return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100
|
| 86 |
+
case 15:
|
| 87 |
+
return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111
|
| 88 |
+
}
|
| 89 |
+
return b;
|
| 90 |
+
}
|
| 91 |
+
static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a,
|
| 92 |
+
const Vectorized<c10::complex<double>>& b,
|
| 93 |
+
const Vectorized<c10::complex<double>>& mask) {
|
| 94 |
+
// convert c10::complex<V> index mask to V index mask: xy -> xxyy
|
| 95 |
+
auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values);
|
| 96 |
+
auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
|
| 97 |
+
auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ);
|
| 98 |
+
return _mm512_mask_blend_pd(mmask, a.values, b.values);
|
| 99 |
+
}
|
| 100 |
+
template<typename step_t>
|
| 101 |
+
static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0.,
|
| 102 |
+
step_t step = static_cast<step_t>(1)) {
|
| 103 |
+
return Vectorized<c10::complex<double>>(base,
|
| 104 |
+
base + c10::complex<double>(1)*step,
|
| 105 |
+
base + c10::complex<double>(2)*step,
|
| 106 |
+
base + c10::complex<double>(3)*step);
|
| 107 |
+
}
|
| 108 |
+
static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a,
|
| 109 |
+
const Vectorized<c10::complex<double>>& b,
|
| 110 |
+
int64_t count = size()) {
|
| 111 |
+
switch (count) {
|
| 112 |
+
case 0:
|
| 113 |
+
return a;
|
| 114 |
+
case 1:
|
| 115 |
+
return blend<1>(a, b);
|
| 116 |
+
case 2:
|
| 117 |
+
return blend<3>(a, b);
|
| 118 |
+
case 3:
|
| 119 |
+
return blend<7>(a, b);
|
| 120 |
+
}
|
| 121 |
+
return b;
|
| 122 |
+
}
|
| 123 |
+
static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
|
| 124 |
+
if (count == size())
|
| 125 |
+
return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
|
| 126 |
+
|
| 127 |
+
__at_align__ double tmp_values[2*size()];
|
| 128 |
+
// Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
|
| 129 |
+
// for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
|
| 130 |
+
// instructions while a loop would be compiled to one instruction.
|
| 131 |
+
for (const auto i : c10::irange(2*size())) {
|
| 132 |
+
tmp_values[i] = 0.0;
|
| 133 |
+
}
|
| 134 |
+
std::memcpy(
|
| 135 |
+
tmp_values,
|
| 136 |
+
reinterpret_cast<const double*>(ptr),
|
| 137 |
+
count * sizeof(c10::complex<double>));
|
| 138 |
+
return _mm512_load_pd(tmp_values);
|
| 139 |
+
}
|
| 140 |
+
void store(void* ptr, int count = size()) const {
|
| 141 |
+
if (count == size()) {
|
| 142 |
+
_mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
|
| 143 |
+
} else if (count > 0) {
|
| 144 |
+
double tmp_values[2*size()];
|
| 145 |
+
_mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
|
| 146 |
+
std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
|
| 147 |
+
}
|
| 148 |
+
}
|
| 149 |
+
const c10::complex<double>& operator[](int idx) const = delete;
|
| 150 |
+
c10::complex<double>& operator[](int idx) = delete;
|
| 151 |
+
Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
|
| 152 |
+
__at_align__ c10::complex<double> tmp[size()];
|
| 153 |
+
store(tmp);
|
| 154 |
+
for (const auto i : c10::irange(size())) {
|
| 155 |
+
tmp[i] = f(tmp[i]);
|
| 156 |
+
}
|
| 157 |
+
return loadu(tmp);
|
| 158 |
+
}
|
| 159 |
+
// AVX512 doesn't have horizontal add & horizontal sub instructions.
|
| 160 |
+
// TODO: hadd_pd() & hsub_pd() may have scope for improvement.
|
| 161 |
+
static inline __m512d hadd_pd(__m512d a, __m512d b) {
|
| 162 |
+
__m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
|
| 163 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
|
| 164 |
+
return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 165 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 166 |
+
}
|
| 167 |
+
static inline __m512d hsub_pd(__m512d a, __m512d b) {
|
| 168 |
+
__m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
|
| 169 |
+
__m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
|
| 170 |
+
return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
|
| 171 |
+
_mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
|
| 172 |
+
}
|
| 173 |
+
__m512d abs_2_() const {
|
| 174 |
+
auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
|
| 175 |
+
return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
|
| 176 |
+
}
|
| 177 |
+
__m512d abs_() const {
|
| 178 |
+
auto real = _mm512_movedup_pd(values); // real real
|
| 179 |
+
// movehdup_pd does not exist...
|
| 180 |
+
auto imag = _mm512_permute_pd(values, 0xff); // imag imag
|
| 181 |
+
return Sleef_hypotd8_u05(real, imag); // abs abs
|
| 182 |
+
}
|
| 183 |
+
Vectorized<c10::complex<double>> abs() const {
|
| 184 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 185 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 186 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 187 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 188 |
+
return _mm512_and_pd(abs_(), real_mask); // abs 0
|
| 189 |
+
}
|
| 190 |
+
__m512d angle_() const {
|
| 191 |
+
//angle = atan2(b/a)
|
| 192 |
+
auto b_a = _mm512_permute_pd(values, 0x55); // b a
|
| 193 |
+
return Sleef_atan2d8_u10(values, b_a); // 90-angle angle
|
| 194 |
+
}
|
| 195 |
+
Vectorized<c10::complex<double>> angle() const {
|
| 196 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 197 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 198 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 199 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 200 |
+
auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle
|
| 201 |
+
return _mm512_and_pd(angle, real_mask); // angle 0
|
| 202 |
+
}
|
| 203 |
+
Vectorized<c10::complex<double>> sgn() const {
|
| 204 |
+
auto abs = abs_();
|
| 205 |
+
auto zero = _mm512_setzero_pd();
|
| 206 |
+
auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ);
|
| 207 |
+
auto div = _mm512_div_pd(values, abs);
|
| 208 |
+
return _mm512_mask_blend_pd(mask, div, zero);
|
| 209 |
+
}
|
| 210 |
+
__m512d real_() const {
|
| 211 |
+
const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 212 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 213 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
|
| 214 |
+
0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
|
| 215 |
+
return _mm512_and_pd(values, real_mask);
|
| 216 |
+
}
|
| 217 |
+
Vectorized<c10::complex<double>> real() const {
|
| 218 |
+
return real_();
|
| 219 |
+
}
|
| 220 |
+
__m512d imag_() const {
|
| 221 |
+
const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 222 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 223 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
|
| 224 |
+
0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
|
| 225 |
+
return _mm512_and_pd(values, imag_mask);
|
| 226 |
+
}
|
| 227 |
+
Vectorized<c10::complex<double>> imag() const {
|
| 228 |
+
return _mm512_permute_pd(imag_(), 0x55); //b a
|
| 229 |
+
}
|
| 230 |
+
__m512d conj_() const {
|
| 231 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 232 |
+
return _mm512_xor_pd(values, sign_mask); // a -b
|
| 233 |
+
}
|
| 234 |
+
Vectorized<c10::complex<double>> conj() const {
|
| 235 |
+
return conj_();
|
| 236 |
+
}
|
| 237 |
+
Vectorized<c10::complex<double>> log() const {
|
| 238 |
+
// Most trigonomic ops use the log() op to improve complex number performance.
|
| 239 |
+
return map(std::log);
|
| 240 |
+
}
|
| 241 |
+
Vectorized<c10::complex<double>> log2() const {
|
| 242 |
+
const __m512d log2_ = _mm512_set1_pd(std::log(2));
|
| 243 |
+
return _mm512_div_pd(log(), log2_);
|
| 244 |
+
}
|
| 245 |
+
Vectorized<c10::complex<double>> log10() const {
|
| 246 |
+
const __m512d log10_ = _mm512_set1_pd(std::log(10));
|
| 247 |
+
return _mm512_div_pd(log(), log10_);
|
| 248 |
+
}
|
| 249 |
+
Vectorized<c10::complex<double>> log1p() const {
|
| 250 |
+
return map(std::log1p);
|
| 251 |
+
}
|
| 252 |
+
Vectorized<c10::complex<double>> asin() const {
|
| 253 |
+
// asin(x)
|
| 254 |
+
// = -i*ln(iz + sqrt(1 -z^2))
|
| 255 |
+
// = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
|
| 256 |
+
// = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
|
| 257 |
+
const __m512d one = _mm512_set1_pd(1);
|
| 258 |
+
|
| 259 |
+
auto conj = conj_();
|
| 260 |
+
auto b_a = _mm512_permute_pd(conj, 0x55); //-b a
|
| 261 |
+
auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab
|
| 262 |
+
auto im = _mm512_add_pd(ab, ab); //-2ab -2ab
|
| 263 |
+
|
| 264 |
+
auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
|
| 265 |
+
auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a
|
| 266 |
+
re = _mm512_sub_pd(one, re);
|
| 267 |
+
|
| 268 |
+
auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im)
|
| 269 |
+
auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt())
|
| 270 |
+
return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln()
|
| 271 |
+
}
|
| 272 |
+
Vectorized<c10::complex<double>> acos() const {
|
| 273 |
+
// acos(x) = pi/2 - asin(x)
|
| 274 |
+
constexpr auto pi_2d = c10::pi<double> / 2;
|
| 275 |
+
const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0);
|
| 276 |
+
return _mm512_sub_pd(pi_2, asin());
|
| 277 |
+
}
|
| 278 |
+
Vectorized<c10::complex<double>> atan() const;
|
| 279 |
+
Vectorized<c10::complex<double>> atanh() const {
|
| 280 |
+
return map(std::atanh);
|
| 281 |
+
}
|
| 282 |
+
Vectorized<c10::complex<double>> exp() const {
|
| 283 |
+
//exp(a + bi)
|
| 284 |
+
// = exp(a)*(cos(b) + sin(b)i)
|
| 285 |
+
auto exp = Sleef_expd8_u10(values); //exp(a) exp(b)
|
| 286 |
+
exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a)
|
| 287 |
+
|
| 288 |
+
auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
|
| 289 |
+
auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55),
|
| 290 |
+
sin_cos.x); //cos(b) sin(b)
|
| 291 |
+
return _mm512_mul_pd(exp, cos_sin);
|
| 292 |
+
}
|
| 293 |
+
Vectorized<c10::complex<double>> exp2() const {
|
| 294 |
+
// Use identity 2**x = exp(log(2) * x)
|
| 295 |
+
const __m512d ln_2 = _mm512_set1_pd(c10::ln_2<double>);
|
| 296 |
+
Vectorized<c10::complex<double>> scaled_values = _mm512_mul_pd(values, ln_2);
|
| 297 |
+
return scaled_values.exp();
|
| 298 |
+
}
|
| 299 |
+
Vectorized<c10::complex<double>> expm1() const {
|
| 300 |
+
return map(std::expm1);
|
| 301 |
+
}
|
| 302 |
+
Vectorized<c10::complex<double>> sin() const {
|
| 303 |
+
return map(std::sin);
|
| 304 |
+
}
|
| 305 |
+
Vectorized<c10::complex<double>> sinh() const {
|
| 306 |
+
return map(std::sinh);
|
| 307 |
+
}
|
| 308 |
+
Vectorized<c10::complex<double>> cos() const {
|
| 309 |
+
return map(std::cos);
|
| 310 |
+
}
|
| 311 |
+
Vectorized<c10::complex<double>> cosh() const {
|
| 312 |
+
return map(std::cosh);
|
| 313 |
+
}
|
| 314 |
+
Vectorized<c10::complex<double>> ceil() const {
|
| 315 |
+
return _mm512_ceil_pd(values);
|
| 316 |
+
}
|
| 317 |
+
Vectorized<c10::complex<double>> floor() const {
|
| 318 |
+
return _mm512_floor_pd(values);
|
| 319 |
+
}
|
| 320 |
+
Vectorized<c10::complex<double>> neg() const {
|
| 321 |
+
auto zero = _mm512_setzero_pd();
|
| 322 |
+
return _mm512_sub_pd(zero, values);
|
| 323 |
+
}
|
| 324 |
+
Vectorized<c10::complex<double>> round() const {
|
| 325 |
+
return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
|
| 326 |
+
}
|
| 327 |
+
Vectorized<c10::complex<double>> tan() const {
|
| 328 |
+
return map(std::tan);
|
| 329 |
+
}
|
| 330 |
+
Vectorized<c10::complex<double>> tanh() const {
|
| 331 |
+
return map(std::tanh);
|
| 332 |
+
}
|
| 333 |
+
Vectorized<c10::complex<double>> trunc() const {
|
| 334 |
+
return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
|
| 335 |
+
}
|
| 336 |
+
Vectorized<c10::complex<double>> sqrt() const {
|
| 337 |
+
return map(std::sqrt);
|
| 338 |
+
}
|
| 339 |
+
Vectorized<c10::complex<double>> reciprocal() const;
|
| 340 |
+
Vectorized<c10::complex<double>> rsqrt() const {
|
| 341 |
+
return sqrt().reciprocal();
|
| 342 |
+
}
|
| 343 |
+
Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
|
| 344 |
+
__at_align__ c10::complex<double> x_tmp[size()];
|
| 345 |
+
__at_align__ c10::complex<double> y_tmp[size()];
|
| 346 |
+
store(x_tmp);
|
| 347 |
+
exp.store(y_tmp);
|
| 348 |
+
for (const auto i : c10::irange(size())) {
|
| 349 |
+
x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
|
| 350 |
+
}
|
| 351 |
+
return loadu(x_tmp);
|
| 352 |
+
}
|
| 353 |
+
// Comparison using the _CMP_**_OQ predicate.
|
| 354 |
+
// `O`: get false if an operand is NaN
|
| 355 |
+
// `Q`: do not raise if an operand is NaN
|
| 356 |
+
Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
|
| 357 |
+
auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
|
| 358 |
+
return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
|
| 359 |
+
0xFFFFFFFFFFFFFFFF));
|
| 360 |
+
}
|
| 361 |
+
Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
|
| 362 |
+
auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
|
| 363 |
+
return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
|
| 364 |
+
0xFFFFFFFFFFFFFFFF));
|
| 365 |
+
}
|
| 366 |
+
Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other) const {
|
| 367 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 368 |
+
}
|
| 369 |
+
Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other) const {
|
| 370 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 371 |
+
}
|
| 372 |
+
Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other) const {
|
| 373 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 374 |
+
}
|
| 375 |
+
Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other) const {
|
| 376 |
+
TORCH_CHECK(false, "not supported for complex numbers");
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
|
| 380 |
+
Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
|
| 381 |
+
};
|
| 382 |
+
|
| 383 |
+
template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a,
|
| 384 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 385 |
+
return _mm512_add_pd(a, b);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a,
|
| 389 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 390 |
+
return _mm512_sub_pd(a, b);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a,
|
| 394 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 395 |
+
//(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
|
| 396 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 397 |
+
auto ac_bd = _mm512_mul_pd(a, b); //ac bd
|
| 398 |
+
|
| 399 |
+
auto d_c = _mm512_permute_pd(b, 0x55); //d c
|
| 400 |
+
d_c = _mm512_xor_pd(sign_mask, d_c); //d -c
|
| 401 |
+
auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc
|
| 402 |
+
|
| 403 |
+
auto ret = Vectorized<c10::complex<double>>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
|
| 404 |
+
return ret;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a,
|
| 408 |
+
const Vectorized<c10::complex<double>> &b) {
|
| 409 |
+
//re + im*i = (a + bi) / (c + di)
|
| 410 |
+
auto mask = _mm512_set1_pd(-0.f);
|
| 411 |
+
auto fabs_cd = _mm512_andnot_pd(mask, b); // |c| |d|
|
| 412 |
+
auto fabs_dc = _mm512_permute_pd(fabs_cd, 0x55); // |d| |c|
|
| 413 |
+
auto scale = _mm512_rcp14_pd(_mm512_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
|
| 414 |
+
auto a2 = _mm512_mul_pd(a, scale); // a/sc b/sc
|
| 415 |
+
auto b2 = _mm512_mul_pd(b, scale); // c/sc d/sc
|
| 416 |
+
auto acbd2 = _mm512_mul_pd(a2, b2);
|
| 417 |
+
|
| 418 |
+
const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
|
| 419 |
+
auto dc2 = _mm512_permute_pd(b2, 0x55); // d/sc c/sc
|
| 420 |
+
dc2 = _mm512_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
|
| 421 |
+
auto adbc2 = _mm512_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
|
| 422 |
+
auto res2 = Vectorized<c10::complex<double>>::hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
|
| 423 |
+
|
| 424 |
+
// get the denominator
|
| 425 |
+
auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
|
| 426 |
+
res2 = _mm512_div_pd(res2, denom2);
|
| 427 |
+
return res2;
|
| 428 |
+
}
|
| 429 |
+
|
| 430 |
+
// reciprocal. Implement this here so we can use multiplication.
|
| 431 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
|
| 432 |
+
//re + im*i = (a + bi) / (c + di)
|
| 433 |
+
//re = (ac + bd)/abs_2() = c/abs_2()
|
| 434 |
+
//im = (bc - ad)/abs_2() = d/abs_2()
|
| 435 |
+
const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
|
| 436 |
+
auto c_d = _mm512_xor_pd(sign_mask, values); //c -d
|
| 437 |
+
return _mm512_div_pd(c_d, abs_2_());
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
|
| 441 |
+
// atan(x) = i/2 * ln((i + z)/(i - z))
|
| 442 |
+
const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
|
| 443 |
+
const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
|
| 444 |
+
|
| 445 |
+
auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b
|
| 446 |
+
auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b
|
| 447 |
+
auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
|
| 448 |
+
return i_half*ln; // i/2*ln()
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
template <>
|
| 452 |
+
Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a,
|
| 453 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 454 |
+
auto zero_vec = _mm512_set1_epi64(0);
|
| 455 |
+
auto abs_a = a.abs_2_();
|
| 456 |
+
auto abs_b = b.abs_2_();
|
| 457 |
+
auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ);
|
| 458 |
+
auto max = _mm512_mask_blend_pd(mask, a, b);
|
| 459 |
+
// Exploit the fact that all-ones is a NaN.
|
| 460 |
+
auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 461 |
+
auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
|
| 462 |
+
0xFFFFFFFFFFFFFFFF);
|
| 463 |
+
return _mm512_or_pd(max, _mm512_castsi512_pd(isnan));
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
template <>
|
| 467 |
+
Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a,
|
| 468 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 469 |
+
auto zero_vec = _mm512_set1_epi64(0);
|
| 470 |
+
auto abs_a = a.abs_2_();
|
| 471 |
+
auto abs_b = b.abs_2_();
|
| 472 |
+
auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ);
|
| 473 |
+
auto min = _mm512_mask_blend_pd(mask, a, b);
|
| 474 |
+
// Exploit the fact that all-ones is a NaN.
|
| 475 |
+
auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
|
| 476 |
+
auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
|
| 477 |
+
0xFFFFFFFFFFFFFFFF);
|
| 478 |
+
return _mm512_or_pd(min, _mm512_castsi512_pd(isnan));
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template <>
|
| 482 |
+
Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a,
|
| 483 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 484 |
+
return _mm512_and_pd(a, b);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
template <>
|
| 488 |
+
Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a,
|
| 489 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 490 |
+
return _mm512_or_pd(a, b);
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
template <>
|
| 494 |
+
Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a,
|
| 495 |
+
const Vectorized<c10::complex<double>>& b) {
|
| 496 |
+
return _mm512_xor_pd(a, b);
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
|
| 500 |
+
auto eq = (*this == other); // compares real and imag individually
|
| 501 |
+
// If both real numbers and imag numbers are equal, then the complex numbers are equal
|
| 502 |
+
return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
|
| 506 |
+
auto ne = (*this != other); // compares real and imag individually
|
| 507 |
+
// If either real numbers or imag numbers are not equal, then the complex numbers are not equal
|
| 508 |
+
return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
|
| 509 |
+
}
|
| 510 |
+
|
| 511 |
+
#endif
|
| 512 |
+
|
| 513 |
+
}}}
|